diff --git a/.asf.yaml b/.asf.yaml index 5a7f5ff722117..5d88fe28742fb 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -29,16 +29,16 @@ notifications: # Read more here: https://github.com/apache/infrastructure-asfyaml github: collaborators: - - brandboat - - FrankYang0529 - - gongxuanzhang - m1a2st - - mingyen066 - - ShivsundarR - smjn - TaiJuWu - - xijiu + - brandboat - Yunyung + - xijiu + - chirag-wadhwa5 + - mingyen066 + - ShivsundarR + - Rancho-7 enabled_merge_buttons: squash: true squash_commit_message: PR_TITLE_AND_DESC diff --git a/.github/actions/run-gradle/action.yml b/.github/actions/run-gradle/action.yml index 8422defb273cf..9c8e0945184bb 100644 --- a/.github/actions/run-gradle/action.yml +++ b/.github/actions/run-gradle/action.yml @@ -83,6 +83,9 @@ runs: RUN_FLAKY_TESTS: ${{ inputs.run-flaky-tests }} TEST_XML_OUTPUT_DIR: ${{ inputs.test-xml-output }} TEST_VERBOSE: ${{ inputs.test-verbose }} + # This build step is invoked by build.yml to run junit tests only, + # Spotbugs is being run by that workflow via the "check" task and does not need to also be run here, + # since that is redundant. run: | set +e ./.github/scripts/thread-dump.sh & @@ -97,6 +100,8 @@ runs: -Pkafka.cluster.test.repeat=$TEST_REPEAT \ -Pkafka.test.verbose=$TEST_VERBOSE \ -PcommitId=xxxxxxxxxxxxxxxx \ + -x spotbugsMain \ + -x spotbugsTest \ $TEST_TASK exitcode="$?" echo "exitcode=$exitcode" >> $GITHUB_OUTPUT diff --git a/.github/actions/setup-gradle/action.yml b/.github/actions/setup-gradle/action.yml index 1a5b0902703ab..57d363471dd29 100644 --- a/.github/actions/setup-gradle/action.yml +++ b/.github/actions/setup-gradle/action.yml @@ -37,12 +37,12 @@ runs: using: "composite" steps: - name: Setup Java - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: distribution: temurin java-version: ${{ inputs.java-version }} - name: Setup Gradle - uses: gradle/actions/setup-gradle@94baf225fe0a508e581a564467443d0e2379123b # v4.3.0 + uses: gradle/actions/setup-gradle@4d9f0ba0025fe599b4ebab900eb7f3a1d93ef4c2 # v5.0.0 env: GRADLE_BUILD_ACTION_CACHE_DEBUG_ENABLED: true with: diff --git a/.github/actions/setup-python/action.yml b/.github/actions/setup-python/action.yml index d7e326314c1be..10c55f6e083b5 100644 --- a/.github/actions/setup-python/action.yml +++ b/.github/actions/setup-python/action.yml @@ -22,7 +22,7 @@ runs: using: "composite" steps: - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.12 - name: Pip install diff --git a/.github/configs/labeler.yml b/.github/configs/labeler.yml index 24a7a643c9042..b988967f33788 100644 --- a/.github/configs/labeler.yml +++ b/.github/configs/labeler.yml @@ -92,6 +92,12 @@ transactions: - any-glob-to-any-file: - 'transaction-coordinator/**' +group-coordinator: + - changed-files: + - any-glob-to-any-file: + - 'group-coordinator/**' + - 'coordinator-common/**' + kip-932: - changed-files: - any-glob-to-any-file: diff --git a/.github/scripts/junit.py b/.github/scripts/junit.py index 95b5d4e4610de..550ea4935116d 100644 --- a/.github/scripts/junit.py +++ b/.github/scripts/junit.py @@ -361,7 +361,7 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: failure_messages.append(f"Gradle task had a failure exit code. Failing this script.") if thread_dump_url: - failure_messages.append(f"Thread dump available at {thread_dump_url}. Failing this script.") + failure_messages.append(f"Thread dump available at {thread_dump_url} and the script will now fail.") if junit_report_url: report_md = f"Download [JUnit HTML report]({junit_report_url})" diff --git a/.github/scripts/pr-format.py b/.github/scripts/pr-format.py index 7793f8c8378db..d2da5e3e5bff3 100644 --- a/.github/scripts/pr-format.py +++ b/.github/scripts/pr-format.py @@ -97,6 +97,8 @@ def split_paragraphs(text: str): else: if line[0] in ("#", "*", "-", "=") or line[0].isdigit(): markdown = True + if "```" in line: + markdown = True paragraph.append(line) yield paragraph, markdown diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt index d59455f79dac6..d3fcf50bb7400 100644 --- a/.github/scripts/requirements.txt +++ b/.github/scripts/requirements.txt @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# Note: Ensure the 'requests' version here matches the version in tests/setup.py PyYAML~=6.0 pytz==2024.2 -requests==2.32.3 +requests==2.32.4 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fc5ee1bd7558b..59c2fabaaed25 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -66,12 +66,12 @@ jobs: name: Load Test Catalog steps: - name: Checkout main - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false - name: Checkout test-catalog - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: 'test-catalog' persist-credentials: false @@ -118,7 +118,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false ref: ${{ github.sha }} # this is the default, just being explicit. @@ -127,7 +127,7 @@ jobs: - name: Setup Gradle uses: ./.github/actions/setup-gradle with: - java-version: 23 + java-version: 17 gradle-cache-read-only: ${{ !inputs.is-trunk }} gradle-cache-write-only: ${{ inputs.is-trunk }} develocity-access-key: ${{ secrets.DEVELOCITY_ACCESS_KEY }} @@ -181,7 +181,7 @@ jobs: fail-fast: false matrix: # If we change these, make sure to adjust ci-complete.yml - java: [ 23, 17 ] + java: [ 25, 17 ] run-flaky: [ true, false ] run-new: [ true, false ] exclude: @@ -192,7 +192,7 @@ jobs: name: JUnit tests Java ${{ matrix.java }}${{ matrix.run-flaky == true && ' (flaky)' || '' }}${{ matrix.run-new == true && ' (new)' || '' }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false ref: ${{ needs.configure.outputs.sha }} @@ -210,7 +210,7 @@ jobs: # the overall workflow, so we'll continue here without a test catalog. - name: Load Test Catalog id: load-test-catalog - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 continue-on-error: true with: name: combined-test-catalog @@ -270,7 +270,7 @@ jobs: python .github/scripts/junit.py \ --path build/junit-xml >> $GITHUB_STEP_SUMMARY - # This job downloads all the JUnit XML files and thread dumps from the JDK 23 test runs. + # This job downloads all the JUnit XML files and thread dumps from the JDK 25 test runs. # If any test job fails, we will not run this job. Also, if any thread dump artifacts # are present, this means there was a timeout in the tests and so we will not proceed # with catalog creation. @@ -282,13 +282,13 @@ jobs: uploaded-test-catalog: ${{ steps.archive-test-catalog.outcome == 'success' }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false - name: Download Thread Dumps - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: - pattern: junit-thread-dumps-23-* + pattern: junit-thread-dumps-25-* path: thread-dumps merge-multiple: true - name: Check For Thread Dump @@ -300,9 +300,9 @@ jobs: exit 1; fi - name: Download JUnit XMLs - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: - pattern: junit-xml-23-* # Only look at JDK 23 tests for the test catalog + pattern: junit-xml-25-* # Only look at JDK 25 tests for the test catalog path: junit-xml merge-multiple: true - name: Collate Test Catalog @@ -334,7 +334,7 @@ jobs: contents: write steps: - name: Checkout Test Catalog - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: true # Needed to commit and push later ref: test-catalog @@ -342,7 +342,7 @@ jobs: run: | rm -rf test-catalog - name: Download Test Catalog - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: test-catalog path: test-catalog diff --git a/.github/workflows/ci-complete.yml b/.github/workflows/ci-complete.yml index 8855c998df28b..1dbd0871e9d1d 100644 --- a/.github/workflows/ci-complete.yml +++ b/.github/workflows/ci-complete.yml @@ -38,13 +38,13 @@ run-name: Build Scans for ${{ github.event.workflow_run.display_title}} jobs: upload-build-scan: # Skip this workflow if the CI run was skipped or cancelled - if: (github.event.workflow_run.conclusion == 'success' || github.event.workflow_run.conclusion == 'failure') + if: (github.event.workflow_run.conclusion == 'success' || github.event.workflow_run.conclusion == 'failure') && github.event.workflow_run.head_branch != '4.0' runs-on: ubuntu-latest strategy: fail-fast: false matrix: - # Make sure these match build.yml - java: [ 23, 17 ] + # Make sure these match build.yml and also keep in mind that GitHub Actions build will always use this file from the trunk branch. + java: [ 25, 17 ] run-flaky: [ true, false ] run-new: [ true, false ] exclude: @@ -61,7 +61,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false @@ -72,7 +72,7 @@ jobs: develocity-access-key: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Download build scan archive id: download-build-scan - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 continue-on-error: true # Don't want this step to fail the overall workflow with: github-token: ${{ github.token }} diff --git a/.github/workflows/deflake.yml b/.github/workflows/deflake.yml index f58408a04739d..3a2fbb56345b2 100644 --- a/.github/workflows/deflake.yml +++ b/.github/workflows/deflake.yml @@ -42,7 +42,7 @@ jobs: name: Deflake JUnit tests steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false diff --git a/.github/workflows/docker_build_and_test.yml b/.github/workflows/docker_build_and_test.yml index 67acdf9fb7424..8358f10433a42 100644 --- a/.github/workflows/docker_build_and_test.yml +++ b/.github/workflows/docker_build_and_test.yml @@ -32,9 +32,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Set up Python 3.10 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.10" - name: Setup Docker Compose @@ -54,7 +54,7 @@ jobs: run: | python docker_build_test.py kafka/test -tag=test -type=$IMAGE_TYPE -u=$KAFKA_URL - name: Run CVE scan - uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 with: image-ref: 'kafka/test:test' format: 'table' diff --git a/.github/workflows/docker_official_image_build_and_test.yml b/.github/workflows/docker_official_image_build_and_test.yml index 58866a19d6cab..1c67ef584720c 100644 --- a/.github/workflows/docker_official_image_build_and_test.yml +++ b/.github/workflows/docker_official_image_build_and_test.yml @@ -31,9 +31,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Set up Python 3.10 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.10" - name: Setup Docker Compose @@ -53,7 +53,7 @@ jobs: run: | python docker_official_image_build_test.py kafka/test -tag=test -type=$IMAGE_TYPE -v=$KAFKA_VERSION - name: Run CVE scan - uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 with: image-ref: 'kafka/test:test' format: 'table' diff --git a/.github/workflows/docker_promote.yml b/.github/workflows/docker_promote.yml index 3b9a6f1d4fb53..e6f8779de6976 100644 --- a/.github/workflows/docker_promote.yml +++ b/.github/workflows/docker_promote.yml @@ -31,11 +31,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up QEMU - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Login to Docker Hub - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker_rc_release.yml b/.github/workflows/docker_rc_release.yml index 1f824b39b977a..8ec489fb12b0b 100644 --- a/.github/workflows/docker_rc_release.yml +++ b/.github/workflows/docker_rc_release.yml @@ -37,9 +37,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set up Python 3.10 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.10" - name: Install dependencies @@ -47,11 +47,11 @@ jobs: python -m pip install --upgrade pip pip install -r docker/requirements.txt - name: Set up QEMU - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Login to Docker Hub - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker_scan.yml b/.github/workflows/docker_scan.yml index db729c5c3ae01..ea8cc95a30346 100644 --- a/.github/workflows/docker_scan.yml +++ b/.github/workflows/docker_scan.yml @@ -26,10 +26,10 @@ jobs: strategy: matrix: # This is an array of supported tags. Make sure this array only contains the supported tags - supported_image_tag: ['latest', '3.7.2', '3.8.1', '3.9.0', '4.0.0'] + supported_image_tag: ['latest', '3.9.1', '4.0.0', '4.1.0'] steps: - name: Run CVE scan - uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 if: always() with: image-ref: apache/kafka:${{ matrix.supported_image_tag }} diff --git a/.github/workflows/generate-reports.yml b/.github/workflows/generate-reports.yml index a3ae34fcecb80..dee7094c27c15 100644 --- a/.github/workflows/generate-reports.yml +++ b/.github/workflows/generate-reports.yml @@ -32,7 +32,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Setup Python uses: ./.github/actions/setup-python - name: Run Report diff --git a/.github/workflows/pr-labeled.yml b/.github/workflows/pr-labeled.yml index 87b39a659ec0c..b5695825861ff 100644 --- a/.github/workflows/pr-labeled.yml +++ b/.github/workflows/pr-labeled.yml @@ -35,7 +35,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false diff --git a/.github/workflows/pr-labels-cron.yml b/.github/workflows/pr-labels-cron.yml index 5faaca72ed36b..420d80498baa6 100644 --- a/.github/workflows/pr-labels-cron.yml +++ b/.github/workflows/pr-labels-cron.yml @@ -35,7 +35,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Remove label - uses: actions/github-script@v7 + uses: actions/github-script@v8 continue-on-error: true with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -77,7 +77,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v9 + - uses: actions/stale@v10 with: debug-only: ${{ inputs.dryRun || false }} operations-per-run: ${{ inputs.operationsPerRun || 500 }} diff --git a/.github/workflows/pr-linter.yml b/.github/workflows/pr-linter.yml index f19efbfabe2c1..d38a9659a01f9 100644 --- a/.github/workflows/pr-linter.yml +++ b/.github/workflows/pr-linter.yml @@ -31,12 +31,12 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false - name: Load PR Number id: load-pr-number - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: github-token: ${{ github.token }} run-id: ${{ github.event.workflow_run.id }} diff --git a/.github/workflows/pr-update.yml b/.github/workflows/pr-update.yml index e1cd7214d6c36..7b45a15d19126 100644 --- a/.github/workflows/pr-update.yml +++ b/.github/workflows/pr-update.yml @@ -37,8 +37,8 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 - - uses: actions/labeler@v5 + uses: actions/checkout@v5 + - uses: actions/labeler@v6 with: configuration-path: .github/configs/labeler.yml - name: check small label diff --git a/.github/workflows/prepare_docker_official_image_source.yml b/.github/workflows/prepare_docker_official_image_source.yml index 32f21a0afd0bf..82204b9b93597 100644 --- a/.github/workflows/prepare_docker_official_image_source.yml +++ b/.github/workflows/prepare_docker_official_image_source.yml @@ -31,9 +31,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Set up Python 3.10 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.10" - name: Install dependencies diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9382d4173e94c..74de2a967b59f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -38,7 +38,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v9 + - uses: actions/stale@v10 with: debug-only: ${{ inputs.dryRun || false }} operations-per-run: ${{ inputs.operationsPerRun || 500 }} diff --git a/.github/workflows/workflow-requested.yml b/.github/workflows/workflow-requested.yml index 6463c72ab0acc..aae00f9ece7b0 100644 --- a/.github/workflows/workflow-requested.yml +++ b/.github/workflows/workflow-requested.yml @@ -38,7 +38,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false diff --git a/LICENSE-binary b/LICENSE-binary index 6175d3ed7d479..91c8865931ea2 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -205,53 +205,54 @@ This project bundles some components that are also licensed under the Apache License Version 2.0: -- caffeine-3.1.8 -- commons-beanutils-1.9.4 +- caffeine-3.2.0 +- commons-beanutils-1.11.0 - commons-collections-3.2.2 - commons-digester-2.1 -- commons-lang3-3.12.0 -- commons-logging-1.3.2 -- commons-validator-1.9.0 -- error_prone_annotations-2.21.1 -- jackson-annotations-2.16.2 -- jackson-core-2.16.2 -- jackson-databind-2.16.2 -- jackson-dataformat-csv-2.16.2 -- jackson-dataformat-yaml-2.16.2 -- jackson-datatype-jdk8-2.16.2 -- jackson-jakarta-rs-base-2.16.2 -- jackson-jakarta-rs-json-provider-2.16.2 -- jackson-module-blackbird-2.16.2 -- jackson-module-jakarta-xmlbind-annotations-2.16.2 +- commons-lang3-3.18.0 +- commons-logging-1.3.5 +- commons-validator-1.10.0 +- hash4j-0.22.0 +- jackson-annotations-2.19.0 +- jackson-core-2.19.0 +- jackson-databind-2.19.0 +- jackson-dataformat-csv-2.19.0 +- jackson-dataformat-yaml-2.19.0 +- jackson-datatype-jdk8-2.19.0 +- jackson-jakarta-rs-base-2.19.0 +- jackson-jakarta-rs-json-provider-2.19.0 +- jackson-module-blackbird-2.19.0 +- jackson-module-jakarta-xmlbind-annotations-2.19.0 - jakarta.inject-api-2.0.1 - jakarta.validation-api-3.0.2 -- javassist-3.29.2-GA -- jetty-alpn-client-12.0.15 -- jetty-client-12.0.15 -- jetty-ee10-servlet-12.0.15 -- jetty-ee10-servlets-12.0.15 -- jetty-http-12.0.15 -- jetty-io-12.0.15 -- jetty-security-12.0.15 -- jetty-server-12.0.15 -- jetty-session-12.0.15 -- jetty-util-12.0.15 -- jose4j-0.9.4 -- log4j-api-2.24.3 -- log4j-core-2.24.3 -- log4j-slf4j-impl-2.24.3 -- log4j-1.2-api-2.24.3 +- javassist-3.30.2-GA +- jetty-alpn-client-12.0.22 +- jetty-client-12.0.22 +- jetty-ee10-servlet-12.0.22 +- jetty-ee10-servlets-12.0.22 +- jetty-http-12.0.22 +- jetty-io-12.0.22 +- jetty-security-12.0.22 +- jetty-server-12.0.22 +- jetty-session-12.0.22 +- jetty-util-12.0.22 +- jose4j-0.9.6 +- jspecify-1.0.0 +- log4j-api-2.25.1 +- log4j-core-2.25.1 +- log4j-slf4j-impl-2.25.1 +- log4j-1.2-api-2.25.1 - lz4-java-1.8.0 - maven-artifact-3.9.6 - metrics-core-2.2.0 -- opentelemetry-proto-1.0.0-alpha +- opentelemetry-proto-1.3.2-alpha - plexus-utils-3.5.1 -- rocksdbjni-9.7.3 -- scala-library-2.13.15 +- rocksdbjni-10.1.3 +- scala-library-2.13.17 - scala-logging_2.13-3.9.5 -- scala-reflect-2.13.15 -- snappy-java-1.1.10.5 -- snakeyaml-2.2 +- scala-reflect-2.13.17 +- snappy-java-1.1.10.7 +- snakeyaml-2.4 - swagger-annotations-2.2.25 =============================================================================== @@ -277,12 +278,12 @@ see: licenses/eclipse-public-license-2.0 - hk2-utils-3.0.6 - osgi-resource-locator-1.0.3 - aopalliance-repackaged-3.0.6 -- jersey-client-3.1.9 -- jersey-common-3.1.9 -- jersey-container-servlet-3.1.9 -- jersey-container-servlet-core-3.1.9 -- jersey-hk2-3.1.9 -- jersey-server-3.1.9 +- jersey-client-3.1.10 +- jersey-common-3.1.10 +- jersey-container-servlet-3.1.10 +- jersey-container-servlet-core-3.1.10 +- jersey-hk2-3.1.10 +- jersey-server-3.1.10 --------------------------------------- CDDL 1.1 + GPLv2 with classpath exception @@ -298,10 +299,10 @@ see: licenses/CDDL+GPL-1.1 MIT License - argparse4j-0.7.0, see: licenses/argparse-MIT -- classgraph-4.8.173, see: licenses/classgraph-MIT +- classgraph-4.8.179, see: licenses/classgraph-MIT - jopt-simple-5.0.4, see: licenses/jopt-simple-MIT - slf4j-api-1.7.36, see: licenses/slf4j-MIT -- pcollections-4.0.1, see: licenses/pcollections-MIT +- pcollections-4.0.2, see: licenses/pcollections-MIT --------------------------------------- BSD 2-Clause @@ -312,11 +313,11 @@ BSD 2-Clause --------------------------------------- BSD 3-Clause -- jline-3.25.1, see: licenses/jline-BSD-3-clause +- jline-3.30.4, see: licenses/jline-BSD-3-clause - protobuf-java-3.25.5, see: licenses/protobuf-java-BSD-3-clause - jakarta.activation-2.0.1, see: licenses/jakarta-BSD-3-clause --------------------------------------- Go License -- re2j-1.7, see: licenses/re2j-GO +- re2j-1.8, see: licenses/re2j-GO diff --git a/README.md b/README.md index 4c2fc4e1f160b..28da38abe1dab 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed. -We build and test Apache Kafka with 17 and 23. The `release` parameter in javac is set to `11` for the clients +We build and test Apache Kafka with 17 and 25. The `release` parameter in javac is set to `11` for the clients and streams modules, and `17` for the rest, ensuring compatibility with their respective minimum Java versions. Similarly, the `release` parameter in scalac is set to `11` for the streams modules and `17` for the rest. @@ -52,6 +52,7 @@ Follow instructions in https://kafka.apache.org/quickstart ### Running a particular unit/integration test ### ./gradlew clients:test --tests RequestResponseTest + ./gradlew streams:integration-tests:test --tests RestoreIntegrationTest ### Repeatedly running a particular unit/integration test with specific times by setting N ### N=500; I=0; while [ $I -lt $N ] && ./gradlew clients:test --tests RequestResponseTest --rerun --fail-fast; do (( I=$I+1 )); echo "Completed run: $I"; sleep 1; done @@ -59,6 +60,7 @@ Follow instructions in https://kafka.apache.org/quickstart ### Running a particular test method within a unit/integration test ### ./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic ./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testTimeToNextUpdate + ./gradlew streams:integration-tests:test --tests org.apache.kafka.streams.integration.RestoreIntegrationTest.shouldRestoreNullRecord ### Running a particular unit/integration test with log4j output ### By default, there will be only small number of logs output while testing. You can adjust it by changing the `log4j2.yaml` file in the module's `src/test/resources` directory. @@ -230,7 +232,7 @@ Alternatively, use the `allDeps` or `allDepInsight` tasks for recursively iterat These take the same arguments as the builtin variants. ### Determining if any dependencies could be updated ### - ./gradlew dependencyUpdates + ./gradlew dependencyUpdates --no-parallel ### Common build options ### diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh index 3463389d3c005..012f9c27f0aca 100755 --- a/bin/kafka-run-class.sh +++ b/bin/kafka-run-class.sh @@ -49,7 +49,7 @@ should_include_file() { base_dir=$(dirname $0)/.. if [ -z "$SCALA_VERSION" ]; then - SCALA_VERSION=2.13.15 + SCALA_VERSION=2.13.17 if [[ -f "$base_dir/gradle.properties" ]]; then SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2` fi @@ -282,7 +282,7 @@ fi # JVM performance options # MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then - KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true" + KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15" fi while [ $# -gt 0 ]; do diff --git a/bin/windows/kafka-run-class.bat b/bin/windows/kafka-run-class.bat index ca151e5df96ed..394269a4294e2 100755 --- a/bin/windows/kafka-run-class.bat +++ b/bin/windows/kafka-run-class.bat @@ -27,7 +27,7 @@ set BASE_DIR=%CD% popd IF ["%SCALA_VERSION%"] EQU [""] ( - set SCALA_VERSION=2.13.15 + set SCALA_VERSION=2.13.17 ) IF ["%SCALA_BINARY_VERSION%"] EQU [""] ( @@ -177,7 +177,7 @@ IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( rem JVM performance options IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] ( - set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true + set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent ) IF not defined CLASSPATH ( diff --git a/build.gradle b/build.gradle index 2e35057165c53..dc3bf215ec88f 100644 --- a/build.gradle +++ b/build.gradle @@ -14,8 +14,6 @@ // limitations under the License. import org.ajoberstar.grgit.Grgit -import org.gradle.api.JavaVersion - import java.nio.charset.StandardCharsets buildscript { @@ -31,25 +29,24 @@ buildscript { } plugins { - id 'com.github.ben-manes.versions' version '0.48.0' + id 'com.github.ben-manes.versions' version '0.53.0' id 'idea' id 'jacoco' id 'java-library' - id 'org.owasp.dependencycheck' version '8.2.1' + id 'org.owasp.dependencycheck' version '12.1.3' id 'org.nosphere.apache.rat' version "0.8.1" id "io.swagger.core.v3.swagger-gradle-plugin" version "${swaggerVersion}" - id "com.github.spotbugs" version '6.0.25' apply false + id "com.github.spotbugs" version '6.2.5' apply false id 'org.scoverage' version '8.0.3' apply false - id 'com.gradleup.shadow' version '8.3.6' apply false - id 'com.diffplug.spotless' version "6.25.0" + id 'com.gradleup.shadow' version '8.3.9' apply false + id 'com.diffplug.spotless' version "7.2.1" } ext { - gradleVersion = versions.gradle minClientJavaVersion = 11 minNonClientJavaVersion = 17 - modulesNeedingJava11 = [":clients", ":generator", ":streams", ":streams:test-utils", ":streams-scala", ":test-common:test-common-util"] + modulesNeedingJava11 = [":clients", ":generator", ":streams", ":streams:test-utils", ":streams:examples", ":streams-scala", ":test-common:test-common-util"] buildVersionFileName = "kafka-version.properties" @@ -74,6 +71,13 @@ ext { "--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED" ) + if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_25)) { + // Spotbugs is not compatible with Java 25+ so Gradle related tasks are disabled + // until version can be upgraded: https://github.com/spotbugs/spotbugs/issues/3564 + project.gradle.startParameter.excludedTaskNames.add("spotbugsMain") + project.gradle.startParameter.excludedTaskNames.add("spotbugsTest") + } + maxTestForks = project.hasProperty('maxParallelForks') ? maxParallelForks.toInteger() : Runtime.runtime.availableProcessors() maxScalacThreads = project.hasProperty('maxScalacThreads') ? maxScalacThreads.toInteger() : Math.min(Runtime.runtime.availableProcessors(), 8) @@ -159,7 +163,7 @@ ext { libs.log4j2Api, libs.log4j2Core ] - + } allprojects { @@ -195,7 +199,8 @@ allprojects { // ensure we have a single version in the classpath despite transitive dependencies libs.scalaLibrary, libs.scalaReflect, - libs.jacksonAnnotations + libs.jacksonAnnotations, + libs.commonsLang ) } } @@ -298,7 +303,7 @@ if (repo != null) { } else { rat.enabled = false } -println("Starting build with version $version (commit id ${commitId == null ? "null" : commitId.take(8)}) using Gradle $gradleVersion, Java ${JavaVersion.current()} and Scala ${versions.scala}") +println("Starting build with version $version (commit id ${commitId == null ? "null" : commitId.take(8)}) using Gradle $versions.gradle, Java ${JavaVersion.current()} and Scala ${versions.scala}") println("Build properties: ignoreFailures=$userIgnoreFailures, maxParallelForks=$maxTestForks, maxScalacThreads=$maxScalacThreads, maxTestRetries=$userMaxTestRetries") subprojects { @@ -329,6 +334,16 @@ subprojects { tasks.register('uploadArchives').configure { dependsOn(publish) } } + tasks.withType(AbstractArchiveTask).configureEach { + reproducibleFileOrder = false + preserveFileTimestamps = true + useFileSystemPermissions() + } + + tasks.withType(AbstractTestTask).configureEach { + failOnNoDiscoveredTests = false + } + // apply the eclipse plugin only to subprojects that hold code. 'connect' is just a folder. if (!project.name.equals('connect')) { apply plugin: 'eclipse' @@ -369,7 +384,6 @@ subprojects { // Fix for avoiding inclusion of runtime dependencies marked as 'shadow' in MANIFEST Class-Path. // https://github.com/GradleUp/shadow/issues/324 - afterEvaluate { pom.withXml { xml -> def dependenciesNode = xml.asNode().get('dependencies') ?: xml.asNode().appendNode('dependencies') project.configurations.shadowed.allDependencies.each { @@ -380,7 +394,6 @@ subprojects { dependencyNode.appendNode('scope', 'runtime') } } - } } afterEvaluate { @@ -485,11 +498,36 @@ subprojects { } } + // Workaround for Mockito Java Agent restrictions in Java 21+ + // Starting with Java 21, the JDK restricts libraries from attaching a Java agent + // to their own JVM. As a result, Mockito’s inline mock maker (mockito-core) + // fails without explicit instrumentation, and the JVM consistently emits warnings. + // See also: https://javadoc.io/doc/org.mockito/mockito-core/latest/org.mockito/org/mockito/Mockito.html#mockito-instrumentation + afterEvaluate { subproject -> + def hasMockitoCore = subproject.configurations.findAll { + it.canBeResolved + }.any { config -> + config.incoming.dependencies.any { dependency -> + "$dependency" == libs.mockitoCore + } + } + + if (hasMockitoCore) { + subproject.configurations { + mockitoAgent { + transitive = false + } + } + subproject.dependencies { + mockitoAgent libs.mockitoCore + } + } + } + // The suites are for running sets of tests in IDEs. // Gradle will run each test class, so we exclude the suites to avoid redundantly running the tests twice. def testsToExclude = ['**/*Suite.class'] - // This task will copy JUnit XML files out of the sub-project's build directory and into // a top-level build/junit-xml directory. This is necessary to avoid reporting on tests which // were not run, but instead were restored via FROM-CACHE. See KAFKA-17479 for more details. @@ -519,6 +557,14 @@ subprojects { } test { + + doFirst { + def mockitoAgentConfig = configurations.findByName('mockitoAgent') + if (mockitoAgentConfig) { + jvmArgs("-javaagent:${mockitoAgentConfig.asPath}") + } + } + maxParallelForks = maxTestForks ignoreFailures = userIgnoreFailures @@ -552,7 +598,7 @@ subprojects { maxFailures = userMaxTestRetryFailures } } - + finalizedBy("copyTestXml") } @@ -894,6 +940,9 @@ project(':server') { } dependencies { + compileOnly libs.bndlib + compileOnly libs.spotbugs + implementation project(':clients') implementation project(':metadata') implementation project(':server-common') @@ -906,6 +955,7 @@ project(':server') { implementation libs.jacksonDatabind implementation libs.metrics implementation libs.slf4jApi + implementation log4j2Libs testImplementation project(':clients').sourceSets.test.output @@ -915,6 +965,7 @@ project(':server') { testImplementation project(':test-common:test-common-internal-api') testImplementation project(':test-common:test-common-runtime') testImplementation project(':storage:storage-api').sourceSets.test.output + testImplementation project(':server-common').sourceSets.test.output testRuntimeOnly runtimeTestLibs } @@ -1004,6 +1055,8 @@ project(':core') { implementation project(':transaction-coordinator') implementation project(':metadata') implementation project(':storage:storage-api') + // tools-api is automatically included in releaseTarGz via core's runtimeClasspath. + // If removed from here, remember to explicitly add it back in the releaseTarGz task. implementation project(':tools:tools-api') implementation project(':raft') implementation project(':storage') @@ -1024,6 +1077,7 @@ project(':core') { implementation libs.scalaReflect implementation libs.scalaLogging implementation libs.slf4jApi + implementation libs.re2j testImplementation project(':clients').sourceSets.test.output testImplementation project(':group-coordinator').sourceSets.test.output @@ -1056,6 +1110,7 @@ project(':core') { testImplementation libs.junitJupiter testImplementation libs.caffeine testImplementation testLog4j2Libs + testImplementation libs.mockOAuth2Server testRuntimeOnly runtimeTestLibs } @@ -1143,6 +1198,13 @@ project(':core') { standardOutput = new File(generatedDocsDir, "topic_config.html").newOutputStream() } + task genGroupConfigDocs(type: JavaExec) { + classpath = sourceSets.main.runtimeClasspath + mainClass = 'org.apache.kafka.coordinator.group.GroupConfig' + if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() } + standardOutput = new File(generatedDocsDir, "group_config.html").newOutputStream() + } + task genConsumerMetricsDocs(type: JavaExec) { classpath = sourceSets.test.runtimeClasspath mainClass = 'org.apache.kafka.clients.consumer.internals.ConsumerMetrics' @@ -1159,7 +1221,7 @@ project(':core') { task siteDocsTar(dependsOn: ['genProtocolErrorDocs', 'genProtocolTypesDocs', 'genProtocolApiKeyDocs', 'genProtocolMessageDocs', 'genAdminClientConfigDocs', 'genProducerConfigDocs', 'genConsumerConfigDocs', - 'genKafkaConfigDocs', 'genTopicConfigDocs', + 'genKafkaConfigDocs', 'genTopicConfigDocs', 'genGroupConfigDocs', ':connect:runtime:genConnectConfigDocs', ':connect:runtime:genConnectTransformationDocs', ':connect:runtime:genConnectPredicateDocs', ':connect:runtime:genSinkConnectorConfigDocs', ':connect:runtime:genSourceConnectorConfigDocs', @@ -1217,8 +1279,6 @@ project(':core') { from(project(':streams:test-utils').configurations.runtimeClasspath) { into("libs/") } from(project(':streams:examples').jar) { into("libs/") } from(project(':streams:examples').configurations.runtimeClasspath) { into("libs/") } - from(project(':tools:tools-api').jar) { into("libs/") } - from(project(':tools:tools-api').configurations.runtimeClasspath) { into("libs/") } duplicatesStrategy 'exclude' } @@ -1420,6 +1480,7 @@ project(':group-coordinator') { implementation libs.hdrHistogram implementation libs.re2j implementation libs.slf4jApi + implementation libs.hash4j testImplementation project(':clients').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output @@ -1762,11 +1823,11 @@ project(':generator') { implementation libs.jacksonJDK8Datatypes implementation libs.jacksonJakartarsJsonProvider - implementation 'org.eclipse.jgit:org.eclipse.jgit:6.4.0.202211300538-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit:7.2.0.202503040940-r' // SSH support for JGit based on Apache MINA sshd - implementation 'org.eclipse.jgit:org.eclipse.jgit.ssh.apache:6.4.0.202211300538-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit.ssh.apache:7.2.0.202503040940-r' // GPG support for JGit based on BouncyCastle (commit signing) - implementation 'org.eclipse.jgit:org.eclipse.jgit.gpg.bc:6.4.0.202211300538-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit.gpg.bc:7.2.0.202503040940-r' testImplementation libs.junitJupiter @@ -1973,6 +2034,7 @@ project(':clients:clients-integration-tests') { } dependencies { + testImplementation libs.metrics testImplementation libs.slf4jApi testImplementation project(':test-common:test-common-internal-api') testImplementation project(':test-common:test-common-runtime') @@ -1982,9 +2044,12 @@ project(':clients:clients-integration-tests') { testImplementation project(':core').sourceSets.test.output testImplementation project(':clients').sourceSets.test.output implementation project(':server-common') + testImplementation project(':server-common').sourceSets.test.output testImplementation project(':metadata') implementation project(':group-coordinator') + implementation project(':group-coordinator:group-coordinator-api') implementation project(':transaction-coordinator') + testImplementation project(':test-common:test-common-util') testImplementation libs.junitJupiter testImplementation libs.junitPlatformSuiteEngine @@ -2230,11 +2295,13 @@ project(':storage') { } dependencies { + implementation project(':metadata') implementation project(':storage:storage-api') implementation project(':server-common') implementation project(':clients') implementation(libs.caffeine) { exclude group: 'org.checkerframework', module: 'checker-qual' + exclude group: 'com.google.errorprone', module: 'error_prone_annotations' } implementation libs.slf4jApi implementation libs.jacksonDatabind @@ -2897,10 +2964,10 @@ project(':streams:examples') { } dependencies { - // this dependency should be removed after we unify data API - implementation(project(':connect:json')) implementation project(':streams') implementation libs.slf4jApi + implementation libs.jacksonDatabind + implementation libs.jacksonAnnotations testImplementation project(':streams:test-utils') testImplementation project(':clients').sourceSets.test.output // for org.apache.kafka.test.IntegrationTest @@ -2936,6 +3003,7 @@ project(':streams:upgrade-system-tests-0110') { dependencies { testImplementation libs.kafkaStreams_0110 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -2951,6 +3019,7 @@ project(':streams:upgrade-system-tests-10') { dependencies { testImplementation libs.kafkaStreams_10 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -2966,6 +3035,7 @@ project(':streams:upgrade-system-tests-11') { dependencies { testImplementation libs.kafkaStreams_11 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -2981,6 +3051,7 @@ project(':streams:upgrade-system-tests-20') { dependencies { testImplementation libs.kafkaStreams_20 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -2996,6 +3067,7 @@ project(':streams:upgrade-system-tests-21') { dependencies { testImplementation libs.kafkaStreams_21 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3011,6 +3083,7 @@ project(':streams:upgrade-system-tests-22') { dependencies { testImplementation libs.kafkaStreams_22 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3026,6 +3099,7 @@ project(':streams:upgrade-system-tests-23') { dependencies { testImplementation libs.kafkaStreams_23 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3041,6 +3115,7 @@ project(':streams:upgrade-system-tests-24') { dependencies { testImplementation libs.kafkaStreams_24 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3056,6 +3131,7 @@ project(':streams:upgrade-system-tests-25') { dependencies { testImplementation libs.kafkaStreams_25 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3071,6 +3147,7 @@ project(':streams:upgrade-system-tests-26') { dependencies { testImplementation libs.kafkaStreams_26 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3086,6 +3163,7 @@ project(':streams:upgrade-system-tests-27') { dependencies { testImplementation libs.kafkaStreams_27 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3101,6 +3179,7 @@ project(':streams:upgrade-system-tests-28') { dependencies { testImplementation libs.kafkaStreams_28 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3116,6 +3195,7 @@ project(':streams:upgrade-system-tests-30') { dependencies { testImplementation libs.kafkaStreams_30 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3131,6 +3211,7 @@ project(':streams:upgrade-system-tests-31') { dependencies { testImplementation libs.kafkaStreams_31 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3146,6 +3227,7 @@ project(':streams:upgrade-system-tests-32') { dependencies { testImplementation libs.kafkaStreams_32 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3161,6 +3243,7 @@ project(':streams:upgrade-system-tests-33') { dependencies { testImplementation libs.kafkaStreams_33 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3176,6 +3259,7 @@ project(':streams:upgrade-system-tests-34') { dependencies { testImplementation libs.kafkaStreams_34 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3191,6 +3275,7 @@ project(':streams:upgrade-system-tests-35') { dependencies { testImplementation libs.kafkaStreams_35 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3206,6 +3291,7 @@ project(':streams:upgrade-system-tests-36') { dependencies { testImplementation libs.kafkaStreams_36 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3221,6 +3307,7 @@ project(':streams:upgrade-system-tests-37') { dependencies { testImplementation libs.kafkaStreams_37 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3236,6 +3323,7 @@ project(':streams:upgrade-system-tests-38') { dependencies { testImplementation libs.kafkaStreams_38 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3251,6 +3339,7 @@ project(':streams:upgrade-system-tests-39') { dependencies { testImplementation libs.kafkaStreams_39 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3266,6 +3355,23 @@ project(':streams:upgrade-system-tests-40') { dependencies { testImplementation libs.kafkaStreams_40 testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs + } + + systemTestLibs { + dependsOn testJar + } +} + +project(':streams:upgrade-system-tests-41') { + base { + archivesName = "kafka-streams-upgrade-system-tests-41" + } + + dependencies { + testImplementation libs.kafkaStreams_41 + testRuntimeOnly libs.junitJupiter + testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3291,6 +3397,7 @@ project(':jmh-benchmarks') { implementation project(':raft') implementation project(':clients') implementation project(':coordinator-common') + implementation project(':coordinator-common').sourceSets.test.output implementation project(':group-coordinator') implementation project(':group-coordinator:group-coordinator-api') implementation project(':metadata') @@ -3339,7 +3446,7 @@ project(':jmh-benchmarks') { if (System.getProperty("jmhArgs")) { args System.getProperty("jmhArgs").split(' ') } - args = [shadowJar.archivePath, *args] + args = [shadowJar.archiveFile.get().asFile, *args] } } diff --git a/checkstyle/import-control-clients-integration-tests.xml b/checkstyle/import-control-clients-integration-tests.xml index 44cf0dba1fbec..3c6f50890d2b8 100644 --- a/checkstyle/import-control-clients-integration-tests.xml +++ b/checkstyle/import-control-clients-integration-tests.xml @@ -21,12 +21,14 @@ - + + + diff --git a/checkstyle/import-control-coordinator-common.xml b/checkstyle/import-control-coordinator-common.xml index bafffe8069746..7841697cf892a 100644 --- a/checkstyle/import-control-coordinator-common.xml +++ b/checkstyle/import-control-coordinator-common.xml @@ -58,9 +58,11 @@ + + diff --git a/checkstyle/import-control-group-coordinator.xml b/checkstyle/import-control-group-coordinator.xml index 8b6a8d99f5eaa..1f0e91de144bc 100644 --- a/checkstyle/import-control-group-coordinator.xml +++ b/checkstyle/import-control-group-coordinator.xml @@ -51,6 +51,7 @@ + @@ -76,6 +77,7 @@ + diff --git a/checkstyle/import-control-jmh-benchmarks.xml b/checkstyle/import-control-jmh-benchmarks.xml index 4469ccf3bbeb9..4c11bc3acb42e 100644 --- a/checkstyle/import-control-jmh-benchmarks.xml +++ b/checkstyle/import-control-jmh-benchmarks.xml @@ -42,7 +42,6 @@ - @@ -53,7 +52,7 @@ - + diff --git a/checkstyle/import-control-metadata.xml b/checkstyle/import-control-metadata.xml index c2660674e3259..773635cec8e9c 100644 --- a/checkstyle/import-control-metadata.xml +++ b/checkstyle/import-control-metadata.xml @@ -83,7 +83,6 @@ - @@ -108,6 +107,7 @@ + @@ -160,9 +160,9 @@ - + @@ -198,18 +198,4 @@ - - - - - - - - - - - - - - diff --git a/checkstyle/import-control-server-common.xml b/checkstyle/import-control-server-common.xml index 8d85dffa341f0..95a014b87e4ec 100644 --- a/checkstyle/import-control-server-common.xml +++ b/checkstyle/import-control-server-common.xml @@ -33,6 +33,7 @@ + @@ -49,6 +50,9 @@ + + + @@ -60,6 +64,7 @@ + diff --git a/checkstyle/import-control-server.xml b/checkstyle/import-control-server.xml index 30a7f5fbe761f..b3d1b928cc6db 100644 --- a/checkstyle/import-control-server.xml +++ b/checkstyle/import-control-server.xml @@ -86,10 +86,15 @@ + + + + + @@ -100,6 +105,10 @@ + + + + diff --git a/checkstyle/import-control-storage.xml b/checkstyle/import-control-storage.xml index 639cb6dc1d04c..2a0f74126859a 100644 --- a/checkstyle/import-control-storage.xml +++ b/checkstyle/import-control-storage.xml @@ -94,6 +94,8 @@ + + @@ -156,4 +158,13 @@ + + + + + + + + + diff --git a/checkstyle/import-control-transaction-coordinator.xml b/checkstyle/import-control-transaction-coordinator.xml index bf2157750c3a6..810c127c95c32 100644 --- a/checkstyle/import-control-transaction-coordinator.xml +++ b/checkstyle/import-control-transaction-coordinator.xml @@ -38,7 +38,9 @@ + + diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index ead4111389bb3..c7f9eaad7ea08 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -145,6 +145,7 @@ + @@ -199,6 +200,8 @@ + + @@ -232,6 +235,7 @@ + @@ -428,6 +432,7 @@ + @@ -492,6 +497,7 @@ + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 4e0f338af5d46..d363b9ed9c0d0 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -41,7 +41,7 @@ files="(KafkaClusterTestKit).java"/> + files="(SharePartitionManagerTest|SharePartitionTest|ShareConsumerTest).java"/> @@ -49,6 +49,7 @@ + + files="(KafkaConsumer|ConsumerCoordinator|AbstractFetch|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient|MockAdminClient|KafkaNetworkChannelTest|ClientTelemetryReporterTest).java"/> @@ -98,7 +99,7 @@ files="(AbstractFetch|ClientTelemetryReporter|ConsumerCoordinator|CommitRequestManager|FetchCollector|OffsetFetcherUtils|KafkaProducer|Sender|ConfigDef|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager|AbstractStickyAssignor|DefaultSslEngineFactory|Authorizer|RecordAccumulator|MemoryRecords|FetchSessionHandler|MockAdminClient).java"/> + files="(AbstractRequest|AbstractResponse|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest|KafkaAdminClientTest|KafkaProducerTest).java"/> diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientRebootstrapTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientRebootstrapTest.java index 2f11e13377d07..b388744b711ab 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientRebootstrapTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientRebootstrapTest.java @@ -24,11 +24,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.Type; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.test.TestUtils; import java.time.Duration; import java.util.List; diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java new file mode 100644 index 0000000000000..a007035e860a8 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/ClientsTestUtils.java @@ -0,0 +1,539 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetCommitCallback; +import org.apache.kafka.clients.consumer.RetriableCommitFailedException; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.ClusterResource; +import org.apache.kafka.common.ClusterResourceListener; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.internals.Topic; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.test.TestUtils; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +import static org.apache.kafka.clients.ClientsTestUtils.TestClusterResourceListenerDeserializer.UPDATE_CONSUMER_COUNT; +import static org.apache.kafka.clients.ClientsTestUtils.TestClusterResourceListenerSerializer.UPDATE_PRODUCER_COUNT; +import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ClientsTestUtils { + + private static final String KEY_PREFIX = "key "; + private static final String VALUE_PREFIX = "value "; + + private ClientsTestUtils() {} + + public static List> consumeRecords( + Consumer consumer, + int numRecords + ) throws InterruptedException { + return consumeRecords(consumer, numRecords, Integer.MAX_VALUE); + } + + public static List> consumeRecords( + Consumer consumer, + int numRecords, + int maxPollRecords + ) throws InterruptedException { + List> consumedRecords = new ArrayList<>(); + TestUtils.waitForCondition(() -> { + var records = consumer.poll(Duration.ofMillis(100)); + records.forEach(consumedRecords::add); + assertTrue(records.count() <= maxPollRecords); + return consumedRecords.size() >= numRecords; + }, 60000, "Timed out before consuming expected " + numRecords + " records."); + + return consumedRecords; + } + + public static void consumeAndVerifyRecords( + Consumer consumer, + TopicPartition tp, + int numRecords, + int startingOffset, + int startingKeyAndValueIndex, + long startingTimestamp, + long timestampIncrement + ) throws InterruptedException { + consumeAndVerifyRecords( + consumer, + tp, + numRecords, + Integer.MAX_VALUE, + startingOffset, + startingKeyAndValueIndex, + startingTimestamp, + timestampIncrement + ); + } + + public static void pollUntilTrue( + Consumer consumer, + Supplier testCondition, + String msg + ) throws InterruptedException { + pollUntilTrue(consumer, Duration.ofMillis(100), testCondition, 15_000L, msg); + } + + public static void pollUntilTrue( + Consumer consumer, + Supplier testCondition, + long waitTimeMs, + String msg + ) throws InterruptedException { + pollUntilTrue(consumer, Duration.ofMillis(100), testCondition, waitTimeMs, msg); + } + + public static void pollUntilTrue( + Consumer consumer, + Duration timeout, + Supplier testCondition, + long waitTimeMs, + String msg + ) throws InterruptedException { + TestUtils.waitForCondition(() -> { + consumer.poll(timeout); + return testCondition.get(); + }, waitTimeMs, msg); + } + + public static void consumeAndVerifyRecordsWithTimeTypeLogAppend( + Consumer consumer, + TopicPartition tp, + int numRecords, + long startingTimestamp + ) throws InterruptedException { + var records = consumeRecords(consumer, numRecords, Integer.MAX_VALUE); + var now = System.currentTimeMillis(); + for (var i = 0; i < numRecords; i++) { + var record = records.get(i); + assertEquals(tp.topic(), record.topic()); + assertEquals(tp.partition(), record.partition()); + + assertTrue(record.timestamp() >= startingTimestamp && record.timestamp() <= now, + "Got unexpected timestamp " + record.timestamp() + ". Timestamp should be between [" + startingTimestamp + ", " + now + "]"); + + assertEquals(i, record.offset()); + assertEquals(KEY_PREFIX + i, new String(record.key())); + assertEquals(VALUE_PREFIX + i, new String(record.value())); + // this is true only because K and V are byte arrays + assertEquals((KEY_PREFIX + i).length(), record.serializedKeySize()); + assertEquals((VALUE_PREFIX + i).length(), record.serializedValueSize()); + } + } + + public static void consumeAndVerifyRecords( + Consumer consumer, + TopicPartition tp, + int numRecords, + int maxPollRecords, + int startingOffset, + int startingKeyAndValueIndex, + long startingTimestamp, + long timestampIncrement + ) throws InterruptedException { + var records = consumeRecords(consumer, numRecords, maxPollRecords); + for (var i = 0; i < numRecords; i++) { + var record = records.get(i); + var offset = startingOffset + i; + + assertEquals(tp.topic(), record.topic()); + assertEquals(tp.partition(), record.partition()); + + assertEquals(TimestampType.CREATE_TIME, record.timestampType()); + var timestamp = startingTimestamp + i * (timestampIncrement > 0 ? timestampIncrement : 1); + assertEquals(timestamp, record.timestamp()); + + assertEquals(offset, record.offset()); + var keyAndValueIndex = startingKeyAndValueIndex + i; + assertEquals(KEY_PREFIX + keyAndValueIndex, new String(record.key())); + assertEquals(VALUE_PREFIX + keyAndValueIndex, new String(record.value())); + // this is true only because K and V are byte arrays + assertEquals((KEY_PREFIX + keyAndValueIndex).length(), record.serializedKeySize()); + assertEquals((VALUE_PREFIX + keyAndValueIndex).length(), record.serializedValueSize()); + } + } + + public static void consumeAndVerifyRecords( + Consumer consumer, + TopicPartition tp, + int numRecords, + int startingOffset, + int startingKeyAndValueIndex, + long startingTimestamp + ) throws InterruptedException { + consumeAndVerifyRecords(consumer, tp, numRecords, startingOffset, startingKeyAndValueIndex, startingTimestamp, -1); + } + + public static void consumeAndVerifyRecords( + Consumer consumer, + TopicPartition tp, + int numRecords, + int startingOffset + ) throws InterruptedException { + consumeAndVerifyRecords(consumer, tp, numRecords, startingOffset, 0, 0, -1); + } + + public static void sendRecords( + ClusterInstance cluster, + TopicPartition tp, + int numRecords, + long startingTimestamp, + long timestampIncrement + ) { + try (Producer producer = cluster.producer()) { + for (var i = 0; i < numRecords; i++) { + sendRecord(producer, tp, startingTimestamp, i, timestampIncrement); + } + producer.flush(); + } + } + + public static void sendRecords( + ClusterInstance cluster, + TopicPartition tp, + int numRecords, + long startingTimestamp + ) { + sendRecords(cluster, tp, numRecords, startingTimestamp, -1); + } + + public static void sendRecords( + ClusterInstance cluster, + TopicPartition tp, + int numRecords + ) { + sendRecords(cluster, tp, numRecords, System.currentTimeMillis()); + } + + public static List> sendRecords( + Producer producer, + TopicPartition tp, + int numRecords, + long startingTimestamp, + long timestampIncrement + ) { + List> records = new ArrayList<>(); + for (var i = 0; i < numRecords; i++) { + var record = sendRecord(producer, tp, startingTimestamp, i, timestampIncrement); + records.add(record); + } + producer.flush(); + return records; + } + + public static void sendRecords( + Producer producer, + TopicPartition tp, + int numRecords, + long startingTimestamp + ) { + for (var i = 0; i < numRecords; i++) { + sendRecord(producer, tp, startingTimestamp, i, -1); + } + producer.flush(); + } + + public static void awaitAssignment( + Consumer consumer, + Set expectedAssignment + ) throws InterruptedException { + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)); + return consumer.assignment().equals(expectedAssignment); + }, "Timed out while awaiting expected assignment " + expectedAssignment + ". " + + "The current assignment is " + consumer.assignment() + ); + } + + private static ProducerRecord sendRecord( + Producer producer, + TopicPartition tp, + long startingTimestamp, + int numRecord, + long timestampIncrement + ) { + var timestamp = startingTimestamp + numRecord * (timestampIncrement > 0 ? timestampIncrement : 1); + var record = new ProducerRecord<>( + tp.topic(), + tp.partition(), + timestamp, + (KEY_PREFIX + numRecord).getBytes(), + (VALUE_PREFIX + numRecord).getBytes() + ); + producer.send(record); + return record; + } + + public static void sendAndAwaitAsyncCommit( + Consumer consumer, + Optional> offsetsOpt + ) throws InterruptedException { + + var commitCallback = new RetryCommitCallback<>(consumer, offsetsOpt); + sendAsyncCommit(consumer, commitCallback, offsetsOpt); + + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)); + return commitCallback.isComplete; + }, "Failed to observe commit callback before timeout"); + + assertEquals(Optional.empty(), commitCallback.error); + } + + public static void awaitRebalance( + Consumer consumer, + TestConsumerReassignmentListener rebalanceListener + ) throws InterruptedException { + var numReassignments = rebalanceListener.callsToAssigned; + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)); + return rebalanceListener.callsToAssigned > numReassignments; + }, "Timed out before expected rebalance completed"); + } + + public static void ensureNoRebalance( + Consumer consumer, + TestConsumerReassignmentListener rebalanceListener + ) throws InterruptedException { + // The best way to verify that the current membership is still active is to commit offsets. + // This would fail if the group had rebalanced. + var initialRevokeCalls = rebalanceListener.callsToRevoked; + sendAndAwaitAsyncCommit(consumer, Optional.empty()); + assertEquals(initialRevokeCalls, rebalanceListener.callsToRevoked); + } + + + public static void waitForPollThrowException( + Consumer consumer, + Class exceptedException + ) throws InterruptedException { + TestUtils.waitForCondition(() -> { + try { + consumer.poll(Duration.ZERO); + return false; + } catch (Exception e) { + return exceptedException.isInstance(e); + } + }, "Continuous poll not fail"); + } + + /** + * This class is intended to replace the test cases in BaseConsumerTest.scala. + * When converting tests that extend from BaseConsumerTest.scala to Java, + * we should use the test cases provided in this class. + */ + public static final class BaseConsumerTestcase { + + public static final int BROKER_COUNT = 3; + public static final String TOPIC = "topic"; + public static final TopicPartition TP = new TopicPartition(TOPIC, 0); + + private BaseConsumerTestcase() { + } + + public static void testSimpleConsumption( + ClusterInstance cluster, + Map config + ) throws InterruptedException { + var numRecords = 10000; + var startingTimestamp = System.currentTimeMillis(); + sendRecords(cluster, TP, numRecords, startingTimestamp); + try (Consumer consumer = cluster.consumer(config)) { + assertEquals(0, consumer.assignment().size()); + consumer.assign(List.of(TP)); + assertEquals(1, consumer.assignment().size()); + consumer.seek(TP, 0); + consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp); + // check async commit callbacks + sendAndAwaitAsyncCommit(consumer, Optional.empty()); + } + } + + public static void testClusterResourceListener( + ClusterInstance cluster, + Map consumerConfig + ) throws InterruptedException { + var numRecords = 100; + Map producerConfig = Map.of( + KEY_SERIALIZER_CLASS_CONFIG, TestClusterResourceListenerSerializer.class, + VALUE_SERIALIZER_CLASS_CONFIG, TestClusterResourceListenerSerializer.class + ); + Map consumerConfigOverrides = new HashMap<>(consumerConfig); + consumerConfigOverrides.put(KEY_DESERIALIZER_CLASS_CONFIG, TestClusterResourceListenerDeserializer.class); + consumerConfigOverrides.put(VALUE_DESERIALIZER_CLASS_CONFIG, TestClusterResourceListenerDeserializer.class); + try (Producer producer = cluster.producer(producerConfig); + Consumer consumer = cluster.consumer(consumerConfigOverrides) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, numRecords, startingTimestamp, -1); + + consumer.subscribe(List.of(TP.topic())); + consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp); + assertNotEquals(0, UPDATE_PRODUCER_COUNT.get()); + assertNotEquals(0, UPDATE_CONSUMER_COUNT.get()); + + TestClusterResourceListenerSerializer.resetCount(); + TestClusterResourceListenerDeserializer.resetCount(); + } + } + + public static void testCoordinatorFailover( + ClusterInstance cluster, + Map consumerConfig + ) throws InterruptedException { + var listener = new TestConsumerReassignmentListener(); + try (Consumer consumer = cluster.consumer(consumerConfig)) { + consumer.subscribe(List.of(TOPIC), listener); + // the initial subscription should cause a callback execution + awaitRebalance(consumer, listener); + assertEquals(1, listener.callsToAssigned); + + // get metadata for the topic + List parts = null; + while (parts == null) { + parts = consumer.partitionsFor(Topic.GROUP_METADATA_TOPIC_NAME); + } + assertEquals(1, parts.size()); + assertNotNull(parts.get(0).leader()); + + // shutdown the coordinator + int coordinator = parts.get(0).leader().id(); + cluster.shutdownBroker(coordinator); + + // the failover should not cause a rebalance + ensureNoRebalance(consumer, listener); + } + } + } + + public static void sendAsyncCommit( + Consumer consumer, + OffsetCommitCallback callback, + Optional> offsetsOpt + ) { + offsetsOpt.ifPresentOrElse( + offsets -> consumer.commitAsync(offsets, callback), + () -> consumer.commitAsync(callback) + ); + } + + public static class TestClusterResourceListenerSerializer implements Serializer, ClusterResourceListener { + + public static final AtomicInteger UPDATE_PRODUCER_COUNT = new AtomicInteger(); + + @Override + public void onUpdate(ClusterResource clusterResource) { + UPDATE_PRODUCER_COUNT.incrementAndGet(); + } + + @Override + public byte[] serialize(String topic, byte[] data) { + return data; + } + + public static void resetCount() { + UPDATE_PRODUCER_COUNT.set(0); + } + } + + public static class TestClusterResourceListenerDeserializer implements Deserializer, ClusterResourceListener { + + public static final AtomicInteger UPDATE_CONSUMER_COUNT = new AtomicInteger(); + + @Override + public void onUpdate(ClusterResource clusterResource) { + UPDATE_CONSUMER_COUNT.incrementAndGet(); + } + + @Override + public byte[] deserialize(String topic, byte[] data) { + return data; + } + + public static void resetCount() { + UPDATE_CONSUMER_COUNT.set(0); + } + } + + private static class RetryCommitCallback implements OffsetCommitCallback { + boolean isComplete = false; + Optional error = Optional.empty(); + Consumer consumer; + Optional> offsetsOpt; + + public RetryCommitCallback( + Consumer consumer, + Optional> offsetsOpt + ) { + this.consumer = consumer; + this.offsetsOpt = offsetsOpt; + } + + @Override + public void onComplete(Map offsets, Exception exception) { + if (exception instanceof RetriableCommitFailedException) { + sendAsyncCommit(consumer, this, offsetsOpt); + } else { + isComplete = true; + error = Optional.ofNullable(exception); + } + } + } + + public static class TestConsumerReassignmentListener implements ConsumerRebalanceListener { + public int callsToAssigned = 0; + public int callsToRevoked = 0; + + @Override + public void onPartitionsAssigned(Collection partitions) { + callsToAssigned += 1; + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + callsToRevoked += 1; + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/CreateTopicsRequestWithPolicyTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/CreateTopicsRequestWithPolicyTest.java new file mode 100644 index 0000000000000..bfbc4794da573 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/CreateTopicsRequestWithPolicyTest.java @@ -0,0 +1,239 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.CreateTopicsOptions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.errors.InvalidPartitionsException; +import org.apache.kafka.common.errors.InvalidReplicationFactorException; +import org.apache.kafka.common.errors.PolicyViolationException; +import org.apache.kafka.common.errors.TopicExistsException; +import org.apache.kafka.common.internals.Topic; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.server.policy.CreateTopicPolicy; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ClusterTestDefaults( + brokers = 3, + serverProperties = { + @ClusterConfigProperty(key = ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, value = "org.apache.kafka.clients.CreateTopicsRequestWithPolicyTest$Policy"), + } +) +public class CreateTopicsRequestWithPolicyTest { + + public static class Policy implements CreateTopicPolicy { + private Map configs; + private boolean closed = false; + + @Override + public void configure(Map configs) { + this.configs = configs; + } + + @Override + public void validate(RequestMetadata requestMetadata) throws PolicyViolationException { + if (Topic.isInternal(requestMetadata.topic())) { + // Do not verify internal topics + return; + } + + if (closed) { + throw new IllegalStateException("Policy should not be closed"); + } + if (configs == null || configs.isEmpty()) { + throw new IllegalStateException("Configure should have been called with non empty configs"); + } + + if (requestMetadata.numPartitions() != null || requestMetadata.replicationFactor() != null) { + assertNotNull(requestMetadata.numPartitions(), "numPartitions should not be null, but it is " + requestMetadata.numPartitions()); + assertNotNull(requestMetadata.replicationFactor(), "replicationFactor should not be null, but it is " + requestMetadata.replicationFactor()); + assertNull(requestMetadata.replicasAssignments(), "replicaAssignments should be null, but it is " + requestMetadata.replicasAssignments()); + + if (requestMetadata.numPartitions() < 5) { + throw new PolicyViolationException("Topics should have at least 5 partitions, received " + + requestMetadata.numPartitions()); + } + + if (requestMetadata.numPartitions() > 10) { + String retentionMs = requestMetadata.configs().get(TopicConfig.RETENTION_MS_CONFIG); + if (retentionMs == null || Integer.parseInt(retentionMs) > 5000) { + throw new PolicyViolationException("RetentionMs should be less than 5000ms if partitions > 10"); + } + } else { + assertTrue(requestMetadata.configs().isEmpty(), "Topic configs should be empty, but it is " + requestMetadata.configs()); + } + } else { + assertNull(requestMetadata.numPartitions(), "numPartitions should be null, but it is " + requestMetadata.numPartitions()); + assertNull(requestMetadata.replicationFactor(), "replicationFactor should be null, but it is " + requestMetadata.replicationFactor()); + assertNotNull(requestMetadata.replicasAssignments(), "replicasAssignments should not be null, but it is " + requestMetadata.replicasAssignments()); + + requestMetadata.replicasAssignments().forEach((partitionId, replicas) -> { + if (replicas.size() < 2) { + throw new PolicyViolationException("Topic partitions should have at least 2 replicas, received " + + replicas.size() + " for partition " + partitionId); + } + }); + } + } + + @Override + public void close() { + closed = true; + } + } + + private void validateValidCreateTopicsRequests(NewTopic topic, Admin admin, boolean validateOnly) throws Exception { + admin.createTopics( + List.of(topic), + new CreateTopicsOptions().validateOnly(validateOnly) + ).all().get(); + } + + @ClusterTest + public void testValidCreateTopicsRequests(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + cluster.createTopic("topic1", 5, (short) 1); + + validateValidCreateTopicsRequests( + new NewTopic("topic2", 5, (short) 3), + admin, + true + ); + + validateValidCreateTopicsRequests( + new NewTopic("topic3", 11, (short) 2) + .configs(Map.of(TopicConfig.RETENTION_MS_CONFIG, "4999")), + admin, + true + ); + + validateValidCreateTopicsRequests( + new NewTopic("topic4", Map.of( + 0, List.of(1, 0), + 1, List.of(0, 1) + )), + admin, + false + ); + } + } + + private void validateErrorCreateTopicsRequests(NewTopic topic, Admin admin, boolean validateOnly, Class expectedExceptionClass, String expectedErrorMessage) { + ExecutionException exception = assertThrows(ExecutionException.class, () -> + admin.createTopics(List.of(topic), new CreateTopicsOptions().validateOnly(validateOnly)).all().get()); + assertEquals( + expectedExceptionClass, + exception.getCause().getClass(), + "Expected " + expectedExceptionClass.getSimpleName() + ", but got " + exception.getCause().getClass().getSimpleName() + ); + assertTrue(exception.getMessage().contains(expectedErrorMessage)); + } + + @ClusterTest + public void testErrorCreateTopicsRequests(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + String existingTopic = "existing-topic"; + cluster.createTopic(existingTopic, 5, (short) 1); + + // Policy violations + validateErrorCreateTopicsRequests( + new NewTopic("policy-topic1", 4, (short) 1), + admin, + false, + PolicyViolationException.class, + "Topics should have at least 5 partitions, received 4" + ); + + validateErrorCreateTopicsRequests( + new NewTopic("policy-topic2", 4, (short) 3), + admin, + true, + PolicyViolationException.class, + "Topics should have at least 5 partitions, received 4" + ); + + validateErrorCreateTopicsRequests( + new NewTopic("policy-topic3", 11, (short) 2) + .configs(Map.of(TopicConfig.RETENTION_MS_CONFIG, "5001")), + admin, + true, + PolicyViolationException.class, + "RetentionMs should be less than 5000ms if partitions > 10" + ); + + validateErrorCreateTopicsRequests( + new NewTopic("policy-topic4", Map.of( + 0, List.of(1), + 1, List.of(0) + )).configs(Map.of(TopicConfig.RETENTION_MS_CONFIG, "5001")), + admin, + true, + PolicyViolationException.class, + "Topic partitions should have at least 2 replicas, received 1 for partition 0" + ); + + // Check that basic errors still work + validateErrorCreateTopicsRequests( + new NewTopic(existingTopic, 5, (short) 1), + admin, + false, + TopicExistsException.class, + "Topic 'existing-topic' already exists." + ); + + validateErrorCreateTopicsRequests( + new NewTopic("error-replication", 10, (short) 4), + admin, + true, + InvalidReplicationFactorException.class, + "Unable to replicate the partition 4 time(s): The target replication factor of 4 cannot be reached because only 3 broker(s) are registered." + ); + + validateErrorCreateTopicsRequests( + new NewTopic("error-replication2", 10, (short) -2), + admin, + true, + InvalidReplicationFactorException.class, + "Replication factor must be larger than 0, or -1 to use the default value." + ); + + validateErrorCreateTopicsRequests( + new NewTopic("error-partitions", -2, (short) 1), + admin, + true, + InvalidPartitionsException.class, + "Number of partitions was set to an invalid non-positive value." + ); + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetadataVersionIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetadataVersionIntegrationTest.java new file mode 100644 index 0000000000000..3d560892fe87d --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetadataVersionIntegrationTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.UpdateFeaturesOptions; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTests; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.server.common.MetadataVersion; +import org.apache.kafka.test.TestUtils; + +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class MetadataVersionIntegrationTest { + @ClusterTests(value = { + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_3_IV3), + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_4_IV0), + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_5_IV0), + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_6_IV0), + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_6_IV1) + }) + public void testBasicMetadataVersionUpgrade(ClusterInstance clusterInstance) throws Exception { + try (var admin = clusterInstance.admin()) { + var describeResult = admin.describeFeatures(); + var ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME); + assertEquals(clusterInstance.config().metadataVersion().featureLevel(), ff.minVersionLevel()); + assertEquals(clusterInstance.config().metadataVersion().featureLevel(), ff.maxVersionLevel()); + + // Update to new version + short updateVersion = MetadataVersion.IBP_3_7_IV1.featureLevel(); + var updateResult = admin.updateFeatures( + Map.of("metadata.version", new FeatureUpdate(updateVersion, FeatureUpdate.UpgradeType.UPGRADE)), + new UpdateFeaturesOptions()); + updateResult.all().get(); + + // Verify that new version is visible on broker + TestUtils.waitForCondition(() -> { + try { + var describeResult2 = admin.describeFeatures(); + var ff2 = describeResult2.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME); + return ff2.minVersionLevel() == updateVersion && ff2.maxVersionLevel() == updateVersion; + } catch (Exception e) { + return false; + } + }, "Never saw metadata.version increase on broker"); + } + } + + @ClusterTest(types = Type.KRAFT, metadataVersion = MetadataVersion.IBP_3_9_IV0) + public void testUpgradeSameVersion(ClusterInstance clusterInstance) throws Exception { + try (var admin = clusterInstance.admin()) { + short updateVersion = MetadataVersion.IBP_3_9_IV0.featureLevel(); + var updateResult = admin.updateFeatures( + Map.of("metadata.version", new FeatureUpdate(updateVersion, FeatureUpdate.UpgradeType.UPGRADE)), + new UpdateFeaturesOptions()); + updateResult.all().get(); + } + } + + @ClusterTest(types = Type.KRAFT) + public void testDefaultIsLatestVersion(ClusterInstance clusterInstance) throws Exception { + try (var admin = clusterInstance.admin()) { + var describeResult = admin.describeFeatures(); + var ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME); + assertEquals(MetadataVersion.latestTesting().featureLevel(), ff.minVersionLevel(), + "If this test fails, check the default MetadataVersion in the @ClusterTest annotation"); + assertEquals(MetadataVersion.latestTesting().featureLevel(), ff.maxVersionLevel()); + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetricsDuringTopicCreationDeletionTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetricsDuringTopicCreationDeletionTest.java new file mode 100644 index 0000000000000..f79ddabd8caf2 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/MetricsDuringTopicCreationDeletionTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.server.config.ReplicationConfigs; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.server.metrics.KafkaYammerMetrics; + +import com.yammer.metrics.core.Gauge; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class MetricsDuringTopicCreationDeletionTest { + + private static final String TOPIC_NAME_PREFIX = "topic"; + private static final int TOPIC_NUM = 2; + private static final int CREATE_DELETE_ITERATIONS = 3; + private static final short REPLICATION_FACTOR = 1; + private static final int PARTITION_NUM = 3; + + private final ClusterInstance clusterInstance; + private final List topics; + private volatile boolean running = true; + + public MetricsDuringTopicCreationDeletionTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + this.topics = new ArrayList<>(); + for (int n = 0; n < TOPIC_NUM; n++) { + topics.add(TOPIC_NAME_PREFIX + n); + } + } + + /* + * Checking all metrics we care in a single test is faster though it would be more elegant to have 3 @Test methods + */ + @ClusterTest( + types = {Type.KRAFT}, + brokers = 1, + serverProperties = { + @ClusterConfigProperty(key = ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, value = "true"), + @ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"), + @ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000"), + @ClusterConfigProperty(key = ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, value = "false"), + // speed up the test for UnderReplicatedPartitions, which relies on the ISR expiry thread to execute concurrently with topic creation + // But the replica.lag.time.max.ms value still need to consider the slow testing environment + @ClusterConfigProperty(key = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_CONFIG, value = "4000") + } + ) + public void testMetricsDuringTopicCreateDelete() throws Exception { + + final int initialOfflinePartitionsCount = getGauge("OfflinePartitionsCount").value(); + final int initialPreferredReplicaImbalanceCount = getGauge("PreferredReplicaImbalanceCount").value(); + final int initialUnderReplicatedPartitionsCount = getGauge("UnderReplicatedPartitions").value(); + + CompletableFuture future = CompletableFuture.runAsync(() -> { + while (running) { + int offlinePartitionsCount = getGauge("OfflinePartitionsCount").value(); + int preferredReplicaImbalanceCount = getGauge("PreferredReplicaImbalanceCount").value(); + int underReplicatedPartitionsCount = getGauge("UnderReplicatedPartitions").value(); + + if (offlinePartitionsCount != initialOfflinePartitionsCount || + preferredReplicaImbalanceCount != initialPreferredReplicaImbalanceCount || + underReplicatedPartitionsCount != initialUnderReplicatedPartitionsCount) { + running = false; + } + + try { + // Avoid busy loop + TimeUnit.MILLISECONDS.sleep(100); + } catch (InterruptedException ignored) { + + } + } + }); + + Closeable runThread = () -> { + running = false; + future.join(); + }; + + try (runThread) { + createAndDeleteTopics(); + } + + final int finalOfflinePartitionsCount = getGauge("OfflinePartitionsCount").value(); + final int finalPreferredReplicaImbalanceCount = getGauge("PreferredReplicaImbalanceCount").value(); + final int finalUnderReplicatedPartitionsCount = getGauge("UnderReplicatedPartitions").value(); + + assertEquals(initialOfflinePartitionsCount, finalOfflinePartitionsCount, + "Expect offlinePartitionsCount to be " + initialOfflinePartitionsCount + ", but got: " + finalOfflinePartitionsCount); + assertEquals(initialPreferredReplicaImbalanceCount, finalPreferredReplicaImbalanceCount, + "Expect PreferredReplicaImbalanceCount to be " + initialPreferredReplicaImbalanceCount + ", but got: " + finalPreferredReplicaImbalanceCount); + assertEquals(initialUnderReplicatedPartitionsCount, finalUnderReplicatedPartitionsCount, + "Expect UnderReplicatedPartitionCount to be " + initialUnderReplicatedPartitionsCount + ", but got: " + finalUnderReplicatedPartitionsCount); + } + + private void createAndDeleteTopics() { + for (int i = 1; i <= CREATE_DELETE_ITERATIONS && running; i++) { + for (String topic : topics) { + if (!running) return; + try { + clusterInstance.createTopic(topic, PARTITION_NUM, REPLICATION_FACTOR); + } catch (Exception ignored) { } + } + + for (String topic : topics) { + if (!running) return; + try { + clusterInstance.deleteTopic(topic); + } catch (Exception ignored) { } + } + } + } + + @SuppressWarnings("unchecked") + private Gauge getGauge(String metricName) { + return KafkaYammerMetrics.defaultRegistry().allMetrics().entrySet().stream() + .filter(entry -> entry.getKey().getName().endsWith(metricName)) + .findFirst() + .map(entry -> (Gauge) entry.getValue()) + .orElseThrow(() -> new AssertionError("Unable to find metric " + metricName)); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsExpirationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsExpirationTest.java index adb55f965afae..01eca9afb971a 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsExpirationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsExpirationTest.java @@ -34,7 +34,6 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterFeature; import org.apache.kafka.common.test.api.ClusterTest; @@ -47,6 +46,7 @@ import org.apache.kafka.server.config.ReplicationConfigs; import org.apache.kafka.server.config.ServerConfigs; import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.test.TestUtils; import java.time.Duration; import java.util.ArrayList; @@ -165,8 +165,8 @@ private void testFatalErrorAfterInvalidProducerIdMapping(ClusterInstance cluster private void testTransactionAfterProducerIdExpires(ClusterInstance clusterInstance, boolean isTV2Enabled) throws InterruptedException { clusterInstance.createTopic(TOPIC1, 4, (short) 3); - long oldProducerId = 0; - long oldProducerEpoch = 0; + long oldProducerId; + long oldProducerEpoch; try (Producer producer = clusterInstance.producer(Map.of( ProducerConfig.TRANSACTIONAL_ID_CONFIG, TRANSACTION_ID diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsWithMaxInFlightOneTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsWithMaxInFlightOneTest.java index fb27f9be4f03f..d95901249c009 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsWithMaxInFlightOneTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/TransactionsWithMaxInFlightOneTest.java @@ -27,7 +27,6 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTestDefaults; @@ -38,6 +37,7 @@ import org.apache.kafka.server.config.ReplicationConfigs; import org.apache.kafka.server.config.ServerConfigs; import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.test.TestUtils; import java.time.Duration; import java.util.ArrayList; diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/AdminClientWithPoliciesIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/AdminClientWithPoliciesIntegrationTest.java new file mode 100644 index 0000000000000..8bad3b1c9006c --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/AdminClientWithPoliciesIntegrationTest.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.admin; + +import org.apache.kafka.clients.admin.AlterConfigOp.OpType; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.SslConfigs; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.errors.InvalidRequestException; +import org.apache.kafka.common.errors.PolicyViolationException; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.network.SocketServerConfigs; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.server.policy.AlterConfigPolicy; +import org.apache.kafka.storage.internals.log.LogConfig; + +import org.junit.jupiter.api.BeforeEach; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.kafka.test.TestUtils.assertFutureThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests AdminClient calls when the broker is configured with policies - AlterConfigPolicy. + */ + +@ClusterTestDefaults( + brokers = 3, + serverProperties = { + @ClusterConfigProperty(key = ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, value = "org.apache.kafka.clients.admin.AdminClientWithPoliciesIntegrationTest$Policy"), + } +) +public class AdminClientWithPoliciesIntegrationTest { + private final ClusterInstance clusterInstance; + private static List validations = new ArrayList<>(); + + AdminClientWithPoliciesIntegrationTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + @BeforeEach + public void setup() throws InterruptedException { + clusterInstance.waitForReadyBrokers(); + } + + @ClusterTest + public void testInvalidAlterConfigsDueToPolicy() throws Exception { + try (final Admin adminClient = clusterInstance.admin()) { + // Create topics + String topic1 = "invalid-alter-configs-due-to-policy-topic-1"; + String topic2 = "invalid-alter-configs-due-to-policy-topic-2"; + String topic3 = "invalid-alter-configs-due-to-policy-topic-3"; + clusterInstance.createTopic(topic1, 1, (short) 1); + clusterInstance.createTopic(topic2, 1, (short) 1); + clusterInstance.createTopic(topic3, 1, (short) 1); + + ConfigResource topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1); + ConfigResource topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2); + ConfigResource topicResource3 = new ConfigResource(ConfigResource.Type.TOPIC, topic3); + + // Set a mutable broker config + ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "0"); // "0" represents the broker ID + Map> configOps = Map.of( + brokerResource, List.of(new AlterConfigOp(new ConfigEntry(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "50000"), OpType.SET)) + ); + adminClient.incrementalAlterConfigs(configOps).all().get(); + assertEquals(Set.of(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG), validationsForResource(brokerResource).get(0).configs().keySet()); + validations.clear(); + + Map> alterConfigs = new HashMap<>(); + alterConfigs.put(topicResource1, List.of( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2"), OpType.SET) + )); + alterConfigs.put(topicResource2, List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.8"), OpType.SET))); + alterConfigs.put(topicResource3, List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "-1"), OpType.SET))); + alterConfigs.put(brokerResource, List.of(new AlterConfigOp(new ConfigEntry(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "12313"), OpType.SET))); + + // Alter configs: second is valid, the others are invalid + AlterConfigsResult alterResult = adminClient.incrementalAlterConfigs(alterConfigs); + assertEquals(Set.of(topicResource1, topicResource2, topicResource3, brokerResource), alterResult.values().keySet()); + assertFutureThrows(PolicyViolationException.class, alterResult.values().get(topicResource1)); + alterResult.values().get(topicResource2).get(); + assertFutureThrows(InvalidConfigurationException.class, alterResult.values().get(topicResource3)); + assertFutureThrows(InvalidRequestException.class, alterResult.values().get(brokerResource)); + assertTrue(validationsForResource(brokerResource).isEmpty(), + "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated."); + validations.clear(); + + // Verify that the second resource was updated and the others were not + clusterInstance.ensureConsistentMetadata(); + DescribeConfigsResult describeResult = adminClient.describeConfigs(List.of(topicResource1, topicResource2, topicResource3, brokerResource)); + var configs = describeResult.all().get(); + assertEquals(4, configs.size()); + + assertEquals(String.valueOf(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO), configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value()); + assertEquals(String.valueOf(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT), configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value()); + + assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value()); + + assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value()); + + // Alter configs with validateOnly = true: only second is valid + alterConfigs.put(topicResource2, List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.7"), OpType.SET))); + alterResult = adminClient.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)); + + assertFutureThrows(PolicyViolationException.class, alterResult.values().get(topicResource1)); + alterResult.values().get(topicResource2).get(); + assertFutureThrows(InvalidConfigurationException.class, alterResult.values().get(topicResource3)); + assertFutureThrows(InvalidRequestException.class, alterResult.values().get(brokerResource)); + assertTrue(validationsForResource(brokerResource).isEmpty(), + "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated."); + validations.clear(); + + // Verify that no resources are updated since validate_only = true + clusterInstance.ensureConsistentMetadata(); + describeResult = adminClient.describeConfigs(List.of(topicResource1, topicResource2, topicResource3, brokerResource)); + configs = describeResult.all().get(); + assertEquals(4, configs.size()); + + assertEquals(String.valueOf(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO), configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value()); + assertEquals(String.valueOf(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT), configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value()); + + assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value()); + + assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value()); + + // Do an incremental alter config on the broker, ensure we don't see the broker config we set earlier in the policy + alterResult = adminClient.incrementalAlterConfigs(Map.of( + brokerResource, List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, "9999"), OpType.SET)) + )); + alterResult.all().get(); + assertEquals(Set.of(SocketServerConfigs.MAX_CONNECTIONS_CONFIG), validationsForResource(brokerResource).get(0).configs().keySet()); + } + } + + private static List validationsForResource(ConfigResource resource) { + return validations.stream().filter(req -> req.resource().equals(resource)).toList(); + } + + /** + * Used in @ClusterTestDefaults serverProperties, so it may appear unused in the IDE. + */ + public static class Policy implements AlterConfigPolicy { + private Map configs; + private boolean closed = false; + + + @Override + public void configure(Map configs) { + validations.clear(); + this.configs = configs; + } + + @Override + public void validate(AlterConfigPolicy.RequestMetadata requestMetadata) { + validations.add(requestMetadata); + assertFalse(closed, "Policy should not be closed"); + assertFalse(configs.isEmpty(), "configure should have been called with non empty configs"); + assertFalse(requestMetadata.configs().isEmpty(), "request configs should not be empty"); + assertFalse(requestMetadata.resource().name().isEmpty(), "resource name should not be empty"); + if (requestMetadata.configs().containsKey(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG)) { + throw new PolicyViolationException("Min in sync replicas cannot be updated"); + } + } + + @Override + public void close() throws Exception { + this.closed = true; + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ClientTelemetryTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ClientTelemetryTest.java index 008b6096c3d79..70881ef8d8a37 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ClientTelemetryTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ClientTelemetryTest.java @@ -46,6 +46,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -54,7 +55,6 @@ import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; -import java.util.stream.Stream; import static java.util.Arrays.asList; import static org.apache.kafka.clients.admin.AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG; @@ -79,7 +79,7 @@ public void testClientInstanceId(ClusterInstance clusterInstance) throws Interru try (Admin admin = Admin.create(configs)) { String testTopicName = "test_topic"; admin.createTopics(Collections.singletonList(new NewTopic(testTopicName, 1, (short) 1))); - clusterInstance.waitForTopic(testTopicName, 1); + clusterInstance.waitTopicCreation(testTopicName, 1); Map producerConfigs = new HashMap<>(); producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); @@ -128,7 +128,7 @@ public void testIntervalMsParser(ClusterInstance clusterInstance) { List alterOpts = asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--alter", "--entity-type", "client-metrics", "--entity-name", "test", "--add-config", "interval.ms=bbb"); try (Admin client = clusterInstance.admin()) { - ConfigCommand.ConfigCommandOptions addOpts = new ConfigCommand.ConfigCommandOptions(toArray(alterOpts)); + ConfigCommand.ConfigCommandOptions addOpts = new ConfigCommand.ConfigCommandOptions(toArray(Set.of(alterOpts))); Throwable e = assertThrows(ExecutionException.class, () -> ConfigCommand.alterConfig(client, addOpts)); assertTrue(e.getMessage().contains(InvalidConfigurationException.class.getSimpleName())); @@ -152,8 +152,8 @@ public void testMetrics(ClusterInstance clusterInstance) { } } - private static String[] toArray(List... lists) { - return Stream.of(lists).flatMap(List::stream).toArray(String[]::new); + private static String[] toArray(Collection> lists) { + return lists.stream().flatMap(List::stream).toArray(String[]::new); } /** @@ -161,6 +161,7 @@ private static String[] toArray(List... lists) { * {@link org.apache.kafka.common.protocol.ApiKeys#GET_TELEMETRY_SUBSCRIPTIONS} command will not be supported * by the server **/ + @SuppressWarnings("unused") public static class GetIdClientTelemetry implements ClientTelemetry, MetricsReporter { diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DeleteTopicTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DeleteTopicTest.java index fceebc0c1b62c..a803b426f50f6 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DeleteTopicTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DeleteTopicTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTestDefaults; @@ -37,6 +36,7 @@ import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.UnifiedLog; import org.apache.kafka.storage.internals.log.VerificationGuard; +import org.apache.kafka.test.TestUtils; import java.util.ArrayList; import java.util.Collection; @@ -70,7 +70,7 @@ public void testDeleteTopicWithAllAliveReplicas(ClusterInstance cluster) throws try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } @@ -93,7 +93,7 @@ public void testResumeDeleteTopicWithRecoveredFollower(ClusterInstance cluster) "Online replicas have not deleted log."); follower.startup(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } @@ -119,7 +119,7 @@ public void testPartitionReassignmentDuringDeleteTopic(ClusterInstance cluster) } follower.startup(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } @@ -147,7 +147,7 @@ public void testIncreasePartitionCountDuringDeleteTopic(ClusterInstance cluster) } follower.startup(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } @@ -166,12 +166,12 @@ public void testDeleteTopicDuringAddPartition(ClusterInstance cluster) throws Ex "Follower " + followerBrokerId + " was not shutdown"); Map newPartitionSet = Map.of(DEFAULT_TOPIC, NewPartitions.increaseTo(3)); admin.createPartitions(newPartitionSet); - cluster.waitForTopic(DEFAULT_TOPIC, 3); + cluster.waitTopicCreation(DEFAULT_TOPIC, 3); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); follower.startup(); // test if topic deletion is resumed - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); waitForReplicaDeleted(cluster.brokers(), newTopicPartition, "Replica logs not for new partition [" + DEFAULT_TOPIC + ",1] not deleted after delete topic is complete."); } } @@ -185,7 +185,7 @@ public void testAddPartitionDuringDeleteTopic(ClusterInstance cluster) throws Ex admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); Map newPartitionSet = Map.of(DEFAULT_TOPIC, NewPartitions.increaseTo(3)); admin.createPartitions(newPartitionSet); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); waitForReplicaDeleted(cluster.brokers(), newTopicPartition, "Replica logs not deleted after delete topic is complete"); } } @@ -196,12 +196,13 @@ public void testRecreateTopicAfterDeletion(ClusterInstance cluster) throws Excep admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); // re-create topic on same replicas admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); waitForReplicaCreated(cluster.brokers(), topicPartition, "Replicas for topic " + DEFAULT_TOPIC + " not created."); } } + @ClusterTest public void testDeleteNonExistingTopic(ClusterInstance cluster) throws Exception { try (Admin admin = cluster.admin()) { @@ -217,10 +218,10 @@ public void testDeleteNonExistingTopic(ClusterInstance cluster) throws Exception } }, "Topic test2 should not exist."); - cluster.waitForTopic(topic, 0); + cluster.waitTopicDeletion(topic); waitForReplicaCreated(cluster.brokers(), topicPartition, "Replicas for topic test not created."); - TestUtils.waitUntilLeaderIsElectedOrChangedWithAdmin(admin, DEFAULT_TOPIC, 0, 1000); + cluster.waitUntilLeaderIsElectedOrChangedWithAdmin(admin, DEFAULT_TOPIC, 0, 1000); } } @@ -245,7 +246,7 @@ public void testDeleteTopicWithCleaner(ClusterInstance cluster) throws Exception server.logManager().cleaner().awaitCleaned(topicPartition, 0, 60000); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } @@ -264,7 +265,7 @@ public void testDeleteTopicAlreadyMarkedAsDeleted(ClusterInstance cluster) throw } }, "Topic " + DEFAULT_TOPIC + " should be marked for deletion or already deleted."); - cluster.waitForTopic(DEFAULT_TOPIC, 0); + cluster.waitTopicDeletion(DEFAULT_TOPIC); } } diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeAuthorizedOperationsTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeAuthorizedOperationsTest.java index d4ad50a8f74b6..1464fc06eb10f 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeAuthorizedOperationsTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeAuthorizedOperationsTest.java @@ -69,8 +69,10 @@ static List generator() { return List.of( ClusterConfig.defaultBuilder() .setTypes(Set.of(Type.KRAFT)) - .setServerProperties(Map.of(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1")) - .setServerProperties(Map.of(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1")) + .setServerProperties(Map.of( + GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1", + GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1" + )) .setBrokerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) .setControllerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) .build() @@ -113,10 +115,10 @@ private void setupSecurity(ClusterInstance clusterInstance) throws ExecutionExce public void testConsumerGroupAuthorizedOperations(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { setupSecurity(clusterInstance); try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD)); - Admin user1 = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD)); + Admin user1 = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD)) ) { admin.createTopics(List.of(new NewTopic("topic1", 1, (short) 1))); - clusterInstance.waitForTopic("topic1", 1); + clusterInstance.waitTopicCreation("topic1", 1); // create consumers to avoid group not found error TopicPartition tp = new TopicPartition("topic1", 0); @@ -191,8 +193,8 @@ public void testTopicAuthorizedOperations(ClusterInstance clusterInstance) throw new NewTopic(topic1, 1, (short) 1), new NewTopic(topic2, 1, (short) 1) )); - clusterInstance.waitForTopic(topic1, 1); - clusterInstance.waitForTopic(topic2, 1); + clusterInstance.waitTopicCreation(topic1, 1); + clusterInstance.waitTopicCreation(topic2, 1); } try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD))) { diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeProducersWithBrokerIdTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeProducersWithBrokerIdTest.java new file mode 100644 index 0000000000000..979989af1cafa --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/DescribeProducersWithBrokerIdTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.BeforeEach; + +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; + + +@ClusterTestDefaults( + brokers = 3 +) +class DescribeProducersWithBrokerIdTest { + private static final String TOPIC_NAME = "test-topic"; + private static final int NUM_PARTITIONS = 1; + private static final short REPLICATION_FACTOR = 3; + + private static final TopicPartition TOPIC_PARTITION = new TopicPartition(TOPIC_NAME, 0); + + private final ClusterInstance clusterInstance; + + public DescribeProducersWithBrokerIdTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + private static void sendTestRecords(Producer producer) { + producer.send(new ProducerRecord<>(TOPIC_NAME, TOPIC_PARTITION.partition(), "key-0".getBytes(), "value-0".getBytes())); + producer.flush(); + } + + @BeforeEach + void setUp() throws InterruptedException { + clusterInstance.createTopic(TOPIC_NAME, NUM_PARTITIONS, REPLICATION_FACTOR); + } + + private List getReplicaBrokerIds(Admin admin) throws Exception { + var topicDescription = admin.describeTopics(List.of(TOPIC_PARTITION.topic())).allTopicNames().get().get(TOPIC_PARTITION.topic()); + return topicDescription.partitions().get(TOPIC_PARTITION.partition()).replicas().stream() + .map(Node::id) + .toList(); + } + + private int getNonReplicaBrokerId(Admin admin) throws Exception { + var replicaBrokerIds = getReplicaBrokerIds(admin); + return clusterInstance.brokerIds().stream() + .filter(id -> !replicaBrokerIds.contains(id)) + .findFirst() + .orElseThrow(() -> new IllegalStateException("No non-replica broker found")); + } + + private int getFollowerBrokerId(Admin admin) throws Exception { + var replicaBrokerIds = getReplicaBrokerIds(admin); + var leaderBrokerId = clusterInstance.getLeaderBrokerId(TOPIC_PARTITION); + return replicaBrokerIds.stream() + .filter(id -> id != leaderBrokerId) + .findFirst() + .orElseThrow(() -> new IllegalStateException("No follower found for partition " + TOPIC_PARTITION)); + } + + @ClusterTest + void testDescribeProducersDefaultRoutesToLeader() throws Exception { + try (Producer producer = clusterInstance.producer(); + var admin = clusterInstance.admin()) { + sendTestRecords(producer); + + var stateWithExplicitLeader = admin.describeProducers( + List.of(TOPIC_PARTITION), + new DescribeProducersOptions().brokerId(clusterInstance.getLeaderBrokerId(TOPIC_PARTITION)) + ).partitionResult(TOPIC_PARTITION).get(); + + var stateWithDefaultRouting = admin.describeProducers( + List.of(TOPIC_PARTITION) + ).partitionResult(TOPIC_PARTITION).get(); + + assertNotNull(stateWithDefaultRouting); + assertFalse(stateWithDefaultRouting.activeProducers().isEmpty()); + assertEquals(stateWithExplicitLeader.activeProducers(), stateWithDefaultRouting.activeProducers()); + } + } + + @ClusterTest + void testDescribeProducersFromFollower() throws Exception { + try (Producer producer = clusterInstance.producer(); + var admin = clusterInstance.admin()) { + sendTestRecords(producer); + + var followerState = admin.describeProducers( + List.of(TOPIC_PARTITION), + new DescribeProducersOptions().brokerId(getFollowerBrokerId(admin)) + ).partitionResult(TOPIC_PARTITION).get(); + + var leaderState = admin.describeProducers( + List.of(TOPIC_PARTITION) + ).partitionResult(TOPIC_PARTITION).get(); + + assertNotNull(followerState); + assertFalse(followerState.activeProducers().isEmpty()); + assertEquals(leaderState.activeProducers(), followerState.activeProducers()); + } + } + + @ClusterTest(brokers = 4) + void testDescribeProducersWithInvalidBrokerId() throws Exception { + try (Producer producer = clusterInstance.producer(); + var admin = clusterInstance.admin()) { + sendTestRecords(producer); + + TestUtils.assertFutureThrows(NotLeaderOrFollowerException.class, + admin.describeProducers( + List.of(TOPIC_PARTITION), + new DescribeProducersOptions().brokerId(getNonReplicaBrokerId(admin)) + ).partitionResult(TOPIC_PARTITION)); + } + } +} \ No newline at end of file diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ListOffsetsIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ListOffsetsIntegrationTest.java index 6b40203c01260..678a3b23b46a7 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ListOffsetsIntegrationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/ListOffsetsIntegrationTest.java @@ -28,12 +28,12 @@ import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTestDefaults; import org.apache.kafka.common.test.api.Type; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -44,13 +44,9 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import scala.jdk.javaapi.CollectionConverters; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -242,9 +238,8 @@ private void verifyListOffsets(String topic, int expectedMaxTimestampOffset) thr // case 2: test the offsets from recovery path. // server will rebuild offset index according to log files if the index files are nonexistent - Set indexFiles = clusterInstance.brokers().values().stream().flatMap(broker -> - CollectionConverters.asJava(broker.config().logDirs()).stream() - ).collect(Collectors.toUnmodifiableSet()); + List indexFiles = clusterInstance.brokers().values().stream().flatMap(broker -> + broker.config().logDirs().stream()).toList(); clusterInstance.brokers().values().forEach(KafkaBroker::shutdown); indexFiles.forEach(root -> { File[] files = new File(String.format("%s/%s-0", root, topic)).listFiles(); diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/RackAwareAutoTopicCreationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/RackAwareAutoTopicCreationTest.java index e3b9efb9b6f94..d394dcb627410 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/RackAwareAutoTopicCreationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/RackAwareAutoTopicCreationTest.java @@ -112,9 +112,7 @@ private static Map> getTopicAssignment(Admin admin) throw private static Map getBrokerToRackMap(ClusterInstance cluster) throws Exception { Map actualBrokerToRackMap = new HashMap<>(); try (Admin admin = cluster.admin()) { - admin.describeCluster().nodes().get().forEach(node -> { - actualBrokerToRackMap.put(node.id(), node.rack()); - }); + admin.describeCluster().nodes().get().forEach(node -> actualBrokerToRackMap.put(node.id(), node.rack())); } return actualBrokerToRackMap; } diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/StaticBrokerConfigTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/StaticBrokerConfigTest.java index 58240c0711e62..8ba228dd25234 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/StaticBrokerConfigTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/admin/StaticBrokerConfigTest.java @@ -17,20 +17,31 @@ package org.apache.kafka.clients.admin; +import kafka.server.KafkaConfig; + +import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.test.ClusterInstance; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.coordinator.group.GroupConfig; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.server.config.KRaftConfigs; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.metrics.ClientMetricsConfigs; +import org.apache.kafka.storage.internals.log.LogConfig; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; public class StaticBrokerConfigTest { private static final String TOPIC = "topic"; @@ -108,4 +119,80 @@ public void testTopicConfigsGetImpactedIfStaticConfigsAddToBroker(ClusterInstanc "Config value should not be custom value since controller doesn't have related static config"); } } + + @ClusterTest(types = {Type.KRAFT}) + public void testInternalConfigsDoNotReturnForDescribeConfigs(ClusterInstance cluster) throws Exception { + try ( + Admin admin = cluster.admin(); + Admin controllerAdmin = cluster.admin(Map.of(), true) + ) { + ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "0"); + ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC); + ConfigResource groupResource = new ConfigResource(ConfigResource.Type.GROUP, "testGroup"); + ConfigResource clientMetricsResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "testClient"); + + admin.createTopics(List.of(new NewTopic(TOPIC, 1, (short) 1))).config(TOPIC).get(); + // make sure the topic metadata exist + cluster.waitTopicCreation(TOPIC, 1); + Map configResourceMap = admin.describeConfigs( + List.of(brokerResource, topicResource, groupResource, clientMetricsResource)).all().get(); + + // test for case ConfigResource.Type == BROKER + // Notice: since the testing framework actively sets three internal configurations when starting the + // broker (see org.apache.kafka.common.test.KafkaClusterTestKit.Builder.createNodeConfig()), + // so the API `describeConfigs` will also return these three configurations. However, other internal + // configurations will not be returned + Set ignoreConfigNames = Set.of( + ServerConfigs.UNSTABLE_FEATURE_VERSIONS_ENABLE_CONFIG, + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, + KRaftConfigs.SERVER_MAX_STARTUP_TIME_MS_CONFIG); + Config brokerConfig = configResourceMap.get(brokerResource); + assertNotContainsInternalConfig(brokerConfig, KafkaConfig.configDef().configKeys(), ignoreConfigNames); + + // test for case ConfigResource.Type == TOPIC + Config topicConfig = configResourceMap.get(topicResource); + assertNotContainsAnyInternalConfig(topicConfig, LogConfig.configKeys()); + + // test for case ConfigResource.Type == GROUP + Config groupConfig = configResourceMap.get(groupResource); + assertNotContainsAnyInternalConfig(groupConfig, GroupConfig.configDef().configKeys()); + + // test for case ConfigResource.Type == CLIENT_METRICS + Config clientMetricsConfig = configResourceMap.get(clientMetricsResource); + assertNotContainsAnyInternalConfig(clientMetricsConfig, ClientMetricsConfigs.configDef().configKeys()); + + // test for controller node, and ConfigResource.Type == BROKER + ConfigResource controllerResource = new ConfigResource(ConfigResource.Type.BROKER, "3000"); + Map controllerConfigMap = controllerAdmin.describeConfigs(List.of(controllerResource)).all().get(); + Config controllerConfig = controllerConfigMap.get(controllerResource); + assertNotContainsInternalConfig(controllerConfig, KafkaConfig.configDef().configKeys(), ignoreConfigNames); + } + } + + @ClusterTest(types = {Type.KRAFT}) + public void testInternalConfigsDoNotReturnForCreateTopics(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + // test for createTopics API + Config config = admin.createTopics(List.of(new NewTopic(TOPIC, 1, (short) 1))).config(TOPIC).get(); + assertNotContainsAnyInternalConfig(config, LogConfig.configKeys()); + } + } + + private void assertNotContainsAnyInternalConfig(Config config, Map configKeyMap) { + assertNotContainsInternalConfig(config, configKeyMap, Set.of()); + } + + private void assertNotContainsInternalConfig(Config config, Map configKeyMap, + Set ignoreConfigNames) { + assertFalse(config.entries().isEmpty()); + for (ConfigEntry topicConfigEntry : config.entries()) { + String configName = topicConfigEntry.name(); + ConfigDef.ConfigKey configKey = configKeyMap.get(configName); + + assertNotNull(configKey, "The ConfigKey of the config named '" + configName + "' should not be null"); + if (!ignoreConfigNames.contains(configName)) { + assertFalse(configKey.internalConfig, "The config named '" + configName + "' is an internal config and should not be returned"); + } + } + } } diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerAssignmentPoller.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerAssignmentPoller.java new file mode 100644 index 0000000000000..a970f7f58d29e --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerAssignmentPoller.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.server.util.ShutdownableThread; + +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +public class ConsumerAssignmentPoller extends ShutdownableThread { + private final Consumer consumer; + private final Set partitionsToAssign; + private volatile Optional thrownException = Optional.empty(); + private volatile int receivedMessages = 0; + private final Set partitionAssignment = Collections.synchronizedSet(new HashSet<>()); + private volatile boolean subscriptionChanged = false; + private List topicsSubscription; + private final ConsumerRebalanceListener rebalanceListener; + + public ConsumerAssignmentPoller(Consumer consumer, List topicsToSubscribe) { + this(consumer, topicsToSubscribe, Set.of(), null); + } + + public ConsumerAssignmentPoller(Consumer consumer, Set partitionsToAssign) { + this(consumer, List.of(), partitionsToAssign, null); + } + + public ConsumerAssignmentPoller(Consumer consumer, + List topicsToSubscribe, + Set partitionsToAssign, + ConsumerRebalanceListener userRebalanceListener) { + super("daemon-consumer-assignment", false); + this.consumer = consumer; + this.partitionsToAssign = partitionsToAssign; + this.topicsSubscription = topicsToSubscribe; + + this.rebalanceListener = new ConsumerRebalanceListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + partitionAssignment.addAll(partitions); + if (userRebalanceListener != null) + userRebalanceListener.onPartitionsAssigned(partitions); + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + partitionAssignment.removeAll(partitions); + if (userRebalanceListener != null) + userRebalanceListener.onPartitionsRevoked(partitions); + } + }; + + if (partitionsToAssign.isEmpty()) { + consumer.subscribe(topicsToSubscribe, rebalanceListener); + } else { + consumer.assign(List.copyOf(partitionsToAssign)); + } + } + + public Set consumerAssignment() { + return Set.copyOf(partitionAssignment); + } + + public void subscribe(List newTopicsToSubscribe) { + if (subscriptionChanged) { + throw new IllegalStateException("Do not call subscribe until the previous subscribe request is processed."); + } + if (!partitionsToAssign.isEmpty()) { + throw new IllegalStateException("Cannot call subscribe when configured to use manual partition assignment"); + } + this.topicsSubscription = newTopicsToSubscribe; + subscriptionChanged = true; + } + + public boolean isSubscribeRequestProcessed() { + return !subscriptionChanged; + } + + @Override + public boolean initiateShutdown() { + boolean res = super.initiateShutdown(); + consumer.wakeup(); + return res; + } + + @Override + public void doWork() { + if (subscriptionChanged) { + consumer.subscribe(topicsSubscription, rebalanceListener); + subscriptionChanged = false; + } + try { + ConsumerRecords records = consumer.poll(Duration.ofMillis(50)); + receivedMessages += records.count(); + } catch (WakeupException e) { + // ignore for shutdown + } catch (Throwable e) { + thrownException = Optional.of(e); + throw e; + } + } + + public Optional getThrownException() { + return thrownException; + } + + public int receivedMessages() { + return receivedMessages; + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerBounceTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerBounceTest.java new file mode 100644 index 0000000000000..2583d09997741 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerBounceTest.java @@ -0,0 +1,814 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import kafka.server.KafkaBroker; + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.errors.GroupMaxSizeReachedException; +import org.apache.kafka.common.message.FindCoordinatorRequestData; +import org.apache.kafka.common.requests.FindCoordinatorRequest; +import org.apache.kafka.common.requests.FindCoordinatorResponse; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.server.IntegrationTestUtils; +import org.apache.kafka.server.config.KRaftConfigs; +import org.apache.kafka.server.config.ReplicationConfigs; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.server.util.ShutdownableThread; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.slf4j.Logger; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.IntStream; + +import static org.apache.kafka.test.TestUtils.SEEDED_RANDOM; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration tests for the consumer that cover basic usage as well as server failures + */ +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = ConsumerBounceTest.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "3"), // don't want to lose offset + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "10"), // set small enough session timeout + @ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "0"), + + // Tests will run for CONSUMER and CLASSIC group protocol, so set the group max size property + // required for each. + @ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MAX_SIZE_CONFIG, value = ConsumerBounceTest.MAX_GROUP_SIZE), + @ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, value = ConsumerBounceTest.MAX_GROUP_SIZE), + + @ClusterConfigProperty(key = ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, value = "false"), + @ClusterConfigProperty(key = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, value = "false"), + @ClusterConfigProperty(key = TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, value = "true"), + @ClusterConfigProperty(key = TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, value = "1000"), + @ClusterConfigProperty(key = ReplicationConfigs.UNCLEAN_LEADER_ELECTION_INTERVAL_MS_CONFIG, value = "50"), + + @ClusterConfigProperty(key = KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG, value = "50"), + @ClusterConfigProperty(key = KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG, value = "300") + } +) +public class ConsumerBounceTest { + + private final Logger logger = new LogContext("ConsumerBounceTest").logger(this.getClass()); + + public static final int BROKER_COUNT = 3; + public static final String MAX_GROUP_SIZE = "5"; + + private final Optional gracefulCloseTimeMs = Optional.of(1000L); + private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(2); + private final String topic = "topic"; + private final int partition = 0; + private final int numPartitions = 3; + private final short numReplica = 3; + private final TopicPartition topicPartition = new TopicPartition(topic, partition); + + private final ClusterInstance clusterInstance; + + private final List> consumers = new ArrayList<>(); + private final List consumerPollers = new ArrayList<>(); + + ConsumerBounceTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + @BeforeEach + void setUp() throws InterruptedException { + clusterInstance.createTopic(topic, numPartitions, numReplica); + } + + @AfterEach + void tearDown() throws InterruptedException { + consumerPollers.forEach(poller -> { + try { + poller.shutdown(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + executor.shutdownNow(); + // Wait for any active tasks to terminate to ensure consumer is not closed while being used from another thread + assertTrue(executor.awaitTermination(5000, TimeUnit.MILLISECONDS), "Executor did not terminate"); + consumers.forEach(Consumer::close); + } + + @ClusterTest + public void testClassicConsumerConsumptionWithBrokerFailures() throws Exception { + consumeWithBrokerFailures(10, GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerConsumptionWithBrokerFailures() throws Exception { + consumeWithBrokerFailures(10, GroupProtocol.CONSUMER); + } + + /** + * 1. Produce a bunch of messages + * 2. Then consume the messages while killing and restarting brokers at random + */ + private void consumeWithBrokerFailures(int numIters, GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 1000; + ClientsTestUtils.sendRecords(clusterInstance, topicPartition, numRecords); + + AtomicInteger consumed = new AtomicInteger(0); + try (Consumer consumer = clusterInstance.consumer(Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + + consumer.subscribe(List.of(topic)); + + BounceBrokerScheduler scheduler = new BounceBrokerScheduler(numIters, clusterInstance); + try { + scheduler.start(); + + while (scheduler.isRunning()) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + + records.forEach(record -> { + assertEquals(consumed.get(), record.offset()); + consumed.incrementAndGet(); + }); + + if (!records.isEmpty()) { + consumer.commitSync(); + + long currentPosition = consumer.position(topicPartition); + long committedOffset = consumer.committed(Set.of(topicPartition)).get(topicPartition).offset(); + assertEquals(currentPosition, committedOffset); + + if (currentPosition == numRecords) { + consumer.seekToBeginning(List.of()); + consumed.set(0); + } + } + } + } finally { + scheduler.shutdown(); + } + } + } + + @ClusterTest + public void testClassicConsumerSeekAndCommitWithBrokerFailures() throws InterruptedException { + seekAndCommitWithBrokerFailures(5, GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerSeekAndCommitWithBrokerFailures() throws InterruptedException { + seekAndCommitWithBrokerFailures(5, GroupProtocol.CONSUMER); + } + + private void seekAndCommitWithBrokerFailures(int numIters, GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 1000; + ClientsTestUtils.sendRecords(clusterInstance, topicPartition, numRecords); + + try (Consumer consumer = clusterInstance.consumer(Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + consumer.assign(List.of(topicPartition)); + consumer.seek(topicPartition, 0); + + TestUtils.waitForCondition(() -> clusterInstance.brokers().values().stream().allMatch(broker -> + broker.replicaManager().localLog(topicPartition).get().highWatermark() == numRecords + ), 30000, "Failed to update high watermark for followers after timeout."); + + BounceBrokerScheduler scheduler = new BounceBrokerScheduler(numIters, clusterInstance); + try { + scheduler.start(); + + while (scheduler.isRunning()) { + int coin = SEEDED_RANDOM.nextInt(0, 3); + + if (coin == 0) { + logger.info("Seeking to end of log."); + consumer.seekToEnd(List.of()); + assertEquals(numRecords, consumer.position(topicPartition)); + } else if (coin == 1) { + int pos = SEEDED_RANDOM.nextInt(numRecords); + logger.info("Seeking to {}", pos); + consumer.seek(topicPartition, pos); + assertEquals(pos, consumer.position(topicPartition)); + } else { + logger.info("Committing offset."); + consumer.commitSync(); + assertEquals(consumer.position(topicPartition), consumer.committed(Set.of(topicPartition)).get(topicPartition).offset()); + } + } + } finally { + scheduler.shutdown(); + } + } + } + + @ClusterTest + public void testClassicSubscribeWhenTopicUnavailable() throws InterruptedException { + testSubscribeWhenTopicUnavailable(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncSubscribeWhenTopicUnavailable() throws InterruptedException { + testSubscribeWhenTopicUnavailable(GroupProtocol.CONSUMER); + } + + private void testSubscribeWhenTopicUnavailable(GroupProtocol groupProtocol) throws InterruptedException { + String newTopic = "new-topic"; + TopicPartition newTopicPartition = new TopicPartition(newTopic, 0); + int numRecords = 1000; + + Consumer consumer = clusterInstance.consumer( + Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name, ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 6000, + ConsumerConfig.METADATA_MAX_AGE_CONFIG, 100)); + consumers.add(consumer); + consumer.subscribe(List.of(newTopic)); + consumer.poll(Duration.ZERO); + // Schedule topic creation after 2 seconds + executor.schedule(() -> assertDoesNotThrow(() -> clusterInstance.createTopic(newTopic, numPartitions, numReplica)), + 2, TimeUnit.SECONDS); + + // Start first poller + ConsumerAssignmentPoller poller = new ConsumerAssignmentPoller(consumer, List.of(newTopic)); + consumerPollers.add(poller); + poller.start(); + ClientsTestUtils.sendRecords(clusterInstance, newTopicPartition, numRecords); + receiveExactRecords(poller, numRecords, 60000L); + poller.shutdown(); + + // Simulate broker failure and recovery + clusterInstance.brokers().keySet().forEach(clusterInstance::shutdownBroker); + Thread.sleep(500); + clusterInstance.brokers().keySet().forEach(clusterInstance::startBroker); + + // Start second poller after recovery + ConsumerAssignmentPoller poller2 = new ConsumerAssignmentPoller(consumer, List.of(newTopic)); + consumerPollers.add(poller2); + poller2.start(); + + ClientsTestUtils.sendRecords(clusterInstance, newTopicPartition, numRecords); + receiveExactRecords(poller2, numRecords, 60000L); + } + + + @ClusterTest + public void testClassicClose() throws Exception { + testClose(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncClose() throws Exception { + testClose(GroupProtocol.CONSUMER); + } + + private void testClose(GroupProtocol groupProtocol) throws Exception { + int numRecords = 10; + ClientsTestUtils.sendRecords(clusterInstance, topicPartition, numRecords); + + checkCloseGoodPath(groupProtocol, numRecords, "group1"); + checkCloseWithCoordinatorFailure(groupProtocol, numRecords, "group2", "group3"); + checkCloseWithClusterFailure(groupProtocol, numRecords, "group4", "group5"); + } + + /** + * Consumer is closed while cluster is healthy. Consumer should complete pending offset commits + * and leave group. New consumer instance should be able to join group and start consuming from + * last committed offset. + */ + private void checkCloseGoodPath(GroupProtocol groupProtocol, int numRecords, String groupId) throws InterruptedException { + Consumer consumer = createConsumerAndReceive(groupId, false, numRecords, Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name)); + assertDoesNotThrow(() -> submitCloseAndValidate(consumer, Long.MAX_VALUE, Optional.empty(), gracefulCloseTimeMs).get()); + checkClosedState(groupId, numRecords); + } + + /** + * Consumer closed while coordinator is unavailable. Close of consumers using group + * management should complete after commit attempt even though commits fail due to rebalance. + * Close of consumers using manual assignment should complete with successful commits since a + * broker is available. + */ + private void checkCloseWithCoordinatorFailure(GroupProtocol groupProtocol, int numRecords, String dynamicGroup, String manualGroup) throws Exception { + Consumer dynamicConsumer = createConsumerAndReceive(dynamicGroup, false, numRecords, Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name)); + Consumer manualConsumer = createConsumerAndReceive(manualGroup, true, numRecords, Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name)); + + findCoordinators(List.of(dynamicGroup, manualGroup)).forEach(clusterInstance::shutdownBroker); + + submitCloseAndValidate(dynamicConsumer, Long.MAX_VALUE, Optional.empty(), gracefulCloseTimeMs).get(); + submitCloseAndValidate(manualConsumer, Long.MAX_VALUE, Optional.empty(), gracefulCloseTimeMs).get(); + + restartDeadBrokers(); + checkClosedState(dynamicGroup, 0); + checkClosedState(manualGroup, numRecords); + } + + /** + * Consumer is closed while all brokers are unavailable. Cannot rebalance or commit offsets since + * there is no coordinator, but close should timeout and return. If close is invoked with a very + * large timeout, close should timeout after request timeout. + */ + private void checkCloseWithClusterFailure(GroupProtocol groupProtocol, int numRecords, String group1, String group2) throws Exception { + Consumer consumer1 = createConsumerAndReceive(group1, false, numRecords, Map.of(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name)); + Map consumerConfig = new HashMap<>(); + + long requestTimeout = 6000; + if (groupProtocol.equals(GroupProtocol.CLASSIC)) { + consumerConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5000"); + consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000"); + } + consumerConfig.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, Long.toString(requestTimeout)); + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name); + + Consumer consumer2 = createConsumerAndReceive(group2, true, numRecords, consumerConfig); + + clusterInstance.brokers().keySet().forEach(clusterInstance::shutdownBroker); + + long closeTimeout = 2000; + submitCloseAndValidate(consumer1, closeTimeout, Optional.empty(), Optional.of(closeTimeout)).get(); + submitCloseAndValidate(consumer2, Long.MAX_VALUE, Optional.empty(), Optional.of(requestTimeout)).get(); + } + + private Set findCoordinators(List groups) throws Exception { + FindCoordinatorRequest request = new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() + .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id()) + .setCoordinatorKeys(groups)).build(); + Set nodes = new HashSet<>(); + TestUtils.waitForCondition(() -> { + FindCoordinatorResponse response = null; + try { + response = IntegrationTestUtils.connectAndReceive(request, clusterInstance.brokerBoundPorts().get(0)); + } catch (IOException e) { + return false; + } + + if (response.hasError()) + return false; + for (String group : groups) + if (response.coordinatorByKey(group).isEmpty()) + return false; + else + nodes.add(response.coordinatorByKey(group).get().nodeId()); + return true; + }, "Failed to find coordinator for group " + groups); + return nodes; + } + + @ClusterTest + public void testClassicConsumerReceivesFatalExceptionWhenGroupPassesMaxSize() throws Exception { + testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerReceivesFatalExceptionWhenGroupPassesMaxSize() throws Exception { + testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(GroupProtocol.CONSUMER); + } + + private void testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(GroupProtocol groupProtocol) throws Exception { + String group = "fatal-exception-test"; + String topic = "fatal-exception-test"; + + Map consumerConfig = new HashMap<>(); + int numPartition = Integer.parseInt(MAX_GROUP_SIZE); + + consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000"); + if (groupProtocol.equals(GroupProtocol.CLASSIC)) { + consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000"); + } + consumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + + clusterInstance.createTopic(topic, Integer.parseInt(MAX_GROUP_SIZE), (short) BROKER_COUNT); + Set partitions = new HashSet<>(); + for (int i = 0; i < Integer.parseInt(MAX_GROUP_SIZE); ++i) + partitions.add(new TopicPartition(topic, i)); + + addConsumersToGroupAndWaitForGroupAssignment( + Integer.parseInt(MAX_GROUP_SIZE), + List.of(topic), + partitions, + group, + consumerConfig + ); + + addConsumersToGroup( + 1, + List.of(topic), + group, + consumerConfig + ); + + ConsumerAssignmentPoller rejectedConsumer = consumerPollers.get(consumerPollers.size() - 1); + consumerPollers.remove(consumerPollers.size() - 1); + + TestUtils.waitForCondition( + () -> rejectedConsumer.getThrownException().isPresent(), + "Extra consumer did not throw an exception" + ); + + assertInstanceOf(GroupMaxSizeReachedException.class, rejectedConsumer.getThrownException().get()); + + // assert group continues to live and the records to be distributed across all partitions. + var data = "data".getBytes(StandardCharsets.UTF_8); + try (Producer producer = clusterInstance.producer()) { + IntStream.range(0, numPartition * 100).forEach(index -> + producer.send(new ProducerRecord<>(topic, index % numPartition, data, data))); + } + + TestUtils.waitForCondition( + () -> consumerPollers.stream().allMatch(p -> p.receivedMessages() >= 100), + 10000L, "The consumers in the group could not fetch the expected records" + ); + } + + /** + * Create 'numOfConsumersToAdd' consumers, add them to the consumer group, and create corresponding + * pollers. Wait for partition re-assignment and validate. + * + * Assignment validation requires that total number of partitions is greater than or equal to + * the resulting number of consumers in the group. + * + * @param numOfConsumersToAdd number of consumers to create and add to the consumer group + * @param topicsToSubscribe topics to subscribe + * @param subscriptions set of all topic partitions + * @param group consumer group ID + */ + private void addConsumersToGroupAndWaitForGroupAssignment( + int numOfConsumersToAdd, + List topicsToSubscribe, + Set subscriptions, + String group, + Map consumerConfig + ) throws InterruptedException { + // Validation: number of consumers should not exceed number of partitions + assertTrue(consumers.size() + numOfConsumersToAdd <= subscriptions.size(), + "Total consumers exceed number of partitions"); + + // Add consumers and pollers + addConsumersToGroup(numOfConsumersToAdd, topicsToSubscribe, group, consumerConfig); + + // Validate that all pollers have assigned partitions + validateGroupAssignment(consumerPollers, subscriptions); + } + + /** + * Check whether partition assignment is valid. + * Assumes partition assignment is valid iff: + * 1. Every consumer got assigned at least one partition + * 2. Each partition is assigned to only one consumer + * 3. Every partition is assigned to one of the consumers + * 4. The assignment is the same as expected assignment (if provided) + * + * @param assignments List of assignments, one set per consumer + * @param partitions All expected partitions + * @param expectedAssignment Optional expected assignment + * @return true if assignment is valid + */ + private boolean isPartitionAssignmentValid( + List> assignments, + Set partitions, + List> expectedAssignment + ) { + // 1. Check that every consumer has non-empty assignment + boolean allNonEmpty = assignments.stream().noneMatch(Set::isEmpty); + if (!allNonEmpty) return false; + + // 2. Check that total assigned partitions equals number of unique partitions + Set allAssignedPartitions = new HashSet<>(); + for (Set assignment : assignments) { + allAssignedPartitions.addAll(assignment); + } + + if (allAssignedPartitions.size() != partitions.size()) { + // Either some partitions were assigned multiple times or some were not assigned + return false; + } + + // 3. Check that assigned partitions exactly match the expected set + if (!allAssignedPartitions.equals(partitions)) { + return false; + } + + // 4. If expected assignment is given, check for exact match + if (expectedAssignment != null && !expectedAssignment.isEmpty()) { + if (assignments.size() != expectedAssignment.size()) return false; + for (int i = 0; i < assignments.size(); i++) { + if (!assignments.get(i).equals(expectedAssignment.get(i))) return false; + } + } + + return true; + } + + /** + * Wait for consumers to get partition assignment and validate it. + * + * @param consumerPollers Consumer pollers corresponding to the consumer group being tested + * @param subscriptions Set of all topic partitions + * @param msg Optional message to print if validation fails + * @param waitTimeMs Wait timeout in milliseconds + * @param expectedAssignments Expected assignments (optional) + */ + private void validateGroupAssignment( + List consumerPollers, + Set subscriptions, + Optional msg, + long waitTimeMs, + List> expectedAssignments + ) throws InterruptedException { + List> assignments = new ArrayList<>(); + + TestUtils.waitForCondition(() -> { + assignments.clear(); + consumerPollers.forEach(poller -> assignments.add(poller.consumerAssignment())); + return isPartitionAssignmentValid(assignments, subscriptions, expectedAssignments); + }, waitTimeMs, msg.orElse("Did not get valid assignment for partitions " + subscriptions + ". Instead got: " + assignments)); + } + + // Overload for convenience (optional msg and expectedAssignments) + private void validateGroupAssignment( + List consumerPollers, + Set subscriptions + ) throws InterruptedException { + validateGroupAssignment(consumerPollers, subscriptions, Optional.empty(), 10000L, new ArrayList<>()); + } + + /** + * Create 'numOfConsumersToAdd' consumers, add them to the consumer group, and create corresponding pollers. + * + * @param numOfConsumersToAdd number of consumers to create and add to the consumer group + * @param topicsToSubscribe topics to which new consumers will subscribe + * @param group consumer group ID + */ + private void addConsumersToGroup( + int numOfConsumersToAdd, + List topicsToSubscribe, + String group, + Map consumerConfigs) { + + Map configs = new HashMap<>(consumerConfigs); + configs.put(ConsumerConfig.GROUP_ID_CONFIG, group); + + for (int i = 0; i < numOfConsumersToAdd; i++) { + Consumer consumer = clusterInstance.consumer(configs); + consumers.add(consumer); + + ConsumerAssignmentPoller poller = new ConsumerAssignmentPoller(consumer, topicsToSubscribe); + poller.start(); + consumerPollers.add(poller); + } + } + + @ClusterTest + public void testClassicCloseDuringRebalance() throws Exception { + testCloseDuringRebalance(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncCloseDuringRebalance() throws Exception { + testCloseDuringRebalance(GroupProtocol.CONSUMER); + } + + public void testCloseDuringRebalance(GroupProtocol groupProtocol) throws Exception { + Map consumerConfig = new HashMap<>(); + consumerConfig.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000"); + if (groupProtocol.equals(GroupProtocol.CLASSIC)) { + consumerConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000"); + } + consumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + + checkCloseDuringRebalance(consumerConfig); + } + + private void checkCloseDuringRebalance(Map consumerConfig) throws Exception { + Map configs = new HashMap<>(consumerConfig); + String groupId = "group"; + configs.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); + + Consumer consumer1 = clusterInstance.consumer(configs); + Future f1 = subscribeAndPoll(consumer1, Optional.empty()); + waitForRebalance(2000, f1, null); + + Consumer consumer2 = clusterInstance.consumer(configs); + Future f2 = subscribeAndPoll(consumer2, Optional.empty()); + waitForRebalance(2000, f2, consumer1); + + Future rebalanceFuture = createConsumerToRebalance(groupId); + + Future closeFuture1 = submitCloseAndValidate(consumer1, Long.MAX_VALUE, Optional.empty(), gracefulCloseTimeMs); + + waitForRebalance(2000, rebalanceFuture, consumer2); + + createConsumerToRebalance(groupId); // one more time + clusterInstance.brokers().values().forEach(KafkaBroker::shutdown); + + Future closeFuture2 = submitCloseAndValidate(consumer2, Long.MAX_VALUE, Optional.empty(), Optional.of(0L)); + + closeFuture1.get(2000, TimeUnit.MILLISECONDS); + closeFuture2.get(2000, TimeUnit.MILLISECONDS); + } + + Future subscribeAndPoll(Consumer consumer, Optional revokeSemaphore) { + return executor.submit(() -> { + consumer.subscribe(List.of(topic)); + revokeSemaphore.ifPresent(Semaphore::release); + consumer.poll(Duration.ofMillis(500)); + return null; + }); + } + + void waitForRebalance(long timeoutMs, Future future, Consumer otherConsumers) { + long startMs = System.currentTimeMillis(); + while (System.currentTimeMillis() < startMs + timeoutMs && !future.isDone()) { + if (otherConsumers != null) { + otherConsumers.poll(Duration.ofMillis(100)); + } + } + assertTrue(future.isDone(), "Rebalance did not complete in time"); + } + + Future createConsumerToRebalance(String groupId) throws Exception { + Consumer consumer = clusterInstance.consumer(Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId)); + consumers.add(consumer); + Semaphore rebalanceSemaphore = new Semaphore(0); + Future future = subscribeAndPoll(consumer, Optional.of(rebalanceSemaphore)); + assertTrue(rebalanceSemaphore.tryAcquire(2000, TimeUnit.MILLISECONDS), "Rebalance not triggered"); + assertFalse(future.isDone(), "Rebalance completed too early"); + return future; + } + + + + private Consumer createConsumerAndReceive(String groupId, boolean manualAssign, int numRecords, + Map consumerConfig) throws InterruptedException { + Map configs = new HashMap<>(consumerConfig); + configs.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); + Consumer consumer = clusterInstance.consumer(configs); + ConsumerAssignmentPoller poller; + + if (manualAssign) { + poller = new ConsumerAssignmentPoller(consumer, Set.of(topicPartition)); + } else { + poller = new ConsumerAssignmentPoller(consumer, List.of(topic)); + } + poller.start(); + consumers.add(consumer); + consumerPollers.add(poller); + receiveExactRecords(poller, numRecords, 60000L); + poller.shutdown(); + + return consumer; + } + + private void restartDeadBrokers() { + clusterInstance.brokers().forEach((id, broker) -> { + if (broker.isShutdown()) { + broker.startup(); + } + }); + } + + private void checkClosedState(String groupId, int committedRecords) throws InterruptedException { + // Check that close was graceful with offsets committed and leave group sent. + // New instance of consumer should be assigned partitions immediately and should see committed offsets. Semaphore assignSemaphore = new Semaphore(0); + + Semaphore assignSemaphore = new Semaphore(0); + try (Consumer consumer = clusterInstance.consumer(Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId))) { + consumer.subscribe(List.of(topic), new ConsumerRebalanceListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + assignSemaphore.release(); + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + // Do nothing + } + }); + + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)); + return assignSemaphore.tryAcquire(); + }, "Assignment did not complete on time"); + + if (committedRecords > 0) { + Map committed = consumer.committed(Set.of(topicPartition)); + long offset = committed.get(topicPartition).offset(); + assertEquals(committedRecords, offset, "Committed offset does not match expected value."); + } + } + } + + private Future submitCloseAndValidate( + Consumer consumer, + long closeTimeoutMs, + Optional minCloseTimeMs, + Optional maxCloseTimeMs) { + + return executor.submit(() -> { + final long closeGraceTimeMs = 2000; + long startMs = System.currentTimeMillis(); + logger.info("Closing consumer with timeout {} ms.", closeTimeoutMs); + + consumer.close(CloseOptions.timeout(Duration.ofMillis(closeTimeoutMs))); + long timeTakenMs = System.currentTimeMillis() - startMs; + + maxCloseTimeMs.ifPresent(ms -> + assertTrue(timeTakenMs < ms + closeGraceTimeMs, "Close took too long " + timeTakenMs) + ); + + minCloseTimeMs.ifPresent(ms -> + assertTrue(timeTakenMs >= ms, "Close finished too quickly " + timeTakenMs) + ); + + logger.info("consumer.close() completed in {} ms.", timeTakenMs); + }, 0); + } + + private void receiveExactRecords(ConsumerAssignmentPoller consumer, int numRecords, long timeoutMs) throws InterruptedException { + TestUtils.waitForCondition(() -> consumer.receivedMessages() == numRecords, timeoutMs, + String.format("Consumer did not receive expected %d. It received %d", numRecords, consumer.receivedMessages())); + } + + // A mock class to represent broker bouncing (simulate broker restart behavior) + private static class BounceBrokerScheduler extends ShutdownableThread { + private final int numIters; + private int iter = 0; + + final ClusterInstance clusterInstance; + + public BounceBrokerScheduler(int numIters, ClusterInstance clusterInstance) { + super("daemon-bounce-broker", false); + this.numIters = numIters; + this.clusterInstance = clusterInstance; + } + + private void killRandomBroker() { + this.clusterInstance.shutdownBroker(TestUtils.randomSelect(clusterInstance.brokerIds())); + } + + private void restartDeadBrokers() { + clusterInstance.brokers().forEach((id, broker) -> { + if (broker.isShutdown()) { + broker.startup(); + } + }); + } + + @Override + public void doWork() { + killRandomBroker(); + assertDoesNotThrow(() -> Thread.sleep(500)); + restartDeadBrokers(); + + iter++; + if (iter == numIters) { + initiateShutdown(); + } else { + assertDoesNotThrow(() -> Thread.sleep(500)); + } + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerIntegrationTest.java index a56de229318dd..220866c240f4a 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerIntegrationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerIntegrationTest.java @@ -16,7 +16,12 @@ */ package org.apache.kafka.clients.consumer; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager; +import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.KafkaException; @@ -26,17 +31,21 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTests; import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.test.TestUtils; import java.time.Duration; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -216,6 +225,116 @@ public void testLeaderEpoch(ClusterInstance clusterInstance) throws Exception { } } + @ClusterTest( + types = {Type.KRAFT}, + brokers = 3, + serverProperties = { + @ClusterConfigProperty(id = 0, key = "broker.rack", value = "rack0"), + @ClusterConfigProperty(id = 1, key = "broker.rack", value = "rack1"), + @ClusterConfigProperty(id = 2, key = "broker.rack", value = "rack2"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, value = "org.apache.kafka.clients.consumer.RackAwareAssignor") + } + ) + public void testRackAwareAssignment(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + String topic = "test-topic"; + try (Admin admin = clusterInstance.admin(); + Producer producer = clusterInstance.producer(); + Consumer consumer0 = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, "group0", + ConsumerConfig.CLIENT_RACK_CONFIG, "rack0", + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name() + )); + Consumer consumer1 = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, "group0", + ConsumerConfig.CLIENT_RACK_CONFIG, "rack1", + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name() + )); + Consumer consumer2 = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, "group0", + ConsumerConfig.CLIENT_RACK_CONFIG, "rack2", + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name() + )) + ) { + // Create a new topic with 1 partition on broker 0. + admin.createTopics(List.of(new NewTopic(topic, Map.of(0, List.of(0))))); + clusterInstance.waitTopicCreation(topic, 1); + + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + producer.flush(); + + consumer0.subscribe(List.of(topic)); + consumer1.subscribe(List.of(topic)); + consumer2.subscribe(List.of(topic)); + + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + return consumer0.assignment().equals(Set.of(new TopicPartition(topic, 0))) && + consumer1.assignment().isEmpty() && + consumer2.assignment().isEmpty(); + }, "Consumer 0 should be assigned to topic partition 0"); + + // Add a new partition 1 and 2 to broker 1. + admin.createPartitions( + Map.of( + topic, + NewPartitions.increaseTo(3, List.of(List.of(1), List.of(1))) + ) + ); + clusterInstance.waitTopicCreation(topic, 3); + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + return consumer0.assignment().equals(Set.of(new TopicPartition(topic, 0))) && + consumer1.assignment().equals(Set.of(new TopicPartition(topic, 1), new TopicPartition(topic, 2))) && + consumer2.assignment().isEmpty(); + }, "Consumer 1 should be assigned to topic partition 1 and 2"); + + // Add a new partition 3, 4, and 5 to broker 2. + admin.createPartitions( + Map.of( + topic, + NewPartitions.increaseTo(6, List.of(List.of(2), List.of(2), List.of(2))) + ) + ); + clusterInstance.waitTopicCreation(topic, 6); + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + return consumer0.assignment().equals(Set.of(new TopicPartition(topic, 0))) && + consumer1.assignment().equals(Set.of(new TopicPartition(topic, 1), new TopicPartition(topic, 2))) && + consumer2.assignment().equals(Set.of(new TopicPartition(topic, 3), new TopicPartition(topic, 4), new TopicPartition(topic, 5))); + }, "Consumer 2 should be assigned to topic partition 3, 4, and 5"); + + // Change partitions to different brokers. + // partition 0 -> broker 2 + // partition 1 -> broker 2 + // partition 2 -> broker 2 + // partition 3 -> broker 1 + // partition 4 -> broker 1 + // partition 5 -> broker 0 + admin.alterPartitionReassignments(Map.of( + new TopicPartition(topic, 0), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 1), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 2), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 3), Optional.of(new NewPartitionReassignment(List.of(1))), + new TopicPartition(topic, 4), Optional.of(new NewPartitionReassignment(List.of(1))), + new TopicPartition(topic, 5), Optional.of(new NewPartitionReassignment(List.of(0))) + )).all().get(); + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + return consumer0.assignment().equals(Set.of(new TopicPartition(topic, 5))) && + consumer1.assignment().equals(Set.of(new TopicPartition(topic, 3), new TopicPartition(topic, 4))) && + consumer2.assignment().equals(Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(topic, 2))); + }, "Consumer with topic partition mapping should be 0 -> 5 | 1 -> 3, 4 | 2 -> 0, 1, 2"); + } + } + private void sendMsg(ClusterInstance clusterInstance, String topic, int sendMsgNum) { try (var producer = clusterInstance.producer(Map.of( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerWithLegacyMessageFormatIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerWithLegacyMessageFormatIntegrationTest.java new file mode 100644 index 0000000000000..755d9c89b7c52 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ConsumerWithLegacyMessageFormatIntegrationTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.record.AbstractRecords; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecordsBuilder; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.RecordVersion; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.storage.internals.log.UnifiedLog; + +import org.junit.jupiter.api.BeforeEach; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +@ClusterTestDefaults( + brokers = 3 +) +public class ConsumerWithLegacyMessageFormatIntegrationTest { + + private final ClusterInstance cluster; + + private final String topic1 = "part-test-topic-1"; + private final String topic2 = "part-test-topic-2"; + private final String topic3 = "part-test-topic-3"; + + private final TopicPartition t1p0 = new TopicPartition(topic1, 0); + private final TopicPartition t1p1 = new TopicPartition(topic1, 1); + private final TopicPartition t2p0 = new TopicPartition(topic2, 0); + private final TopicPartition t2p1 = new TopicPartition(topic2, 1); + private final TopicPartition t3p0 = new TopicPartition(topic3, 0); + private final TopicPartition t3p1 = new TopicPartition(topic3, 1); + + public ConsumerWithLegacyMessageFormatIntegrationTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + private void appendLegacyRecords(int numRecords, TopicPartition tp, int brokerId, byte magicValue) { + List records = new ArrayList<>(); + for (int i = 0; i < numRecords; i++) { + records.add(new SimpleRecord(i, ("key " + i).getBytes(), ("value " + i).getBytes())); + } + + ByteBuffer buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, + CompressionType.NONE, records)); + MemoryRecordsBuilder builder = MemoryRecords.builder( + buffer, + magicValue, + Compression.of(CompressionType.NONE).build(), + TimestampType.CREATE_TIME, + 0L, + RecordBatch.NO_TIMESTAMP, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + 0, + false, + RecordBatch.NO_PARTITION_LEADER_EPOCH + ); + + records.forEach(builder::append); + + cluster.brokers().values().stream() + .filter(b -> b.config().brokerId() == brokerId) + .forEach(b -> { + UnifiedLog unifiedLog = b.replicaManager().logManager().getLog(tp, false).get(); + unifiedLog.appendAsLeaderWithRecordVersion(builder.build(), 0, RecordVersion.lookup(magicValue)); + // Default isolation.level is read_uncommitted. It makes Partition#fetchOffsetForTimestamp to return UnifiedLog#highWatermark, + // so increasing high watermark to make it return the correct offset. + assertDoesNotThrow(() -> unifiedLog.maybeIncrementHighWatermark(unifiedLog.logEndOffsetMetadata())); + }); + } + + private void createTopicWithAssignment(String topic, Map> assignment) throws InterruptedException { + try (Admin admin = cluster.admin()) { + NewTopic newTopic = new NewTopic(topic, assignment); + admin.createTopics(List.of(newTopic)); + cluster.waitTopicCreation(topic, assignment.size()); + } + } + + @BeforeEach + public void setupTopics() throws InterruptedException { + cluster.createTopic(topic1, 2, (short) 1); + createTopicWithAssignment(topic2, Map.of(0, List.of(0), 1, List.of(1))); + createTopicWithAssignment(topic3, Map.of(0, List.of(0), 1, List.of(1))); + + // v2 message format for topic1 + ClientsTestUtils.sendRecords(cluster, t1p0, 100, 0); + ClientsTestUtils.sendRecords(cluster, t1p1, 100, 0); + // v0 message format for topic2 + appendLegacyRecords(100, t2p0, 0, RecordBatch.MAGIC_VALUE_V0); + appendLegacyRecords(100, t2p1, 1, RecordBatch.MAGIC_VALUE_V0); + // v1 message format for topic3 + appendLegacyRecords(100, t3p0, 0, RecordBatch.MAGIC_VALUE_V1); + appendLegacyRecords(100, t3p1, 1, RecordBatch.MAGIC_VALUE_V1); + } + + @ClusterTest + public void testOffsetsForTimesWithClassicConsumer() { + testOffsetsForTimes(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testOffsetsForTimesWithAsyncConsumer() { + testOffsetsForTimes(GroupProtocol.CONSUMER); + } + + public void testOffsetsForTimes(GroupProtocol groupProtocol) { + try (Consumer consumer = cluster.consumer(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT))) + ) { + // Test negative target time + assertThrows(IllegalArgumentException.class, () -> + consumer.offsetsForTimes(Map.of(t1p0, -1L))); + + Map timestampsToSearch = Map.of( + t1p0, 0L, + t1p1, 20L, + t2p0, 40L, + t2p1, 60L, + t3p0, 80L, + t3p1, 100L + ); + + Map timestampOffsets = consumer.offsetsForTimes(timestampsToSearch); + + OffsetAndTimestamp timestampTopic1P0 = timestampOffsets.get(t1p0); + assertEquals(0, timestampTopic1P0.offset()); + assertEquals(0, timestampTopic1P0.timestamp()); + assertEquals(Optional.of(0), timestampTopic1P0.leaderEpoch()); + + OffsetAndTimestamp timestampTopic1P1 = timestampOffsets.get(t1p1); + assertEquals(20, timestampTopic1P1.offset()); + assertEquals(20, timestampTopic1P1.timestamp()); + assertEquals(Optional.of(0), timestampTopic1P1.leaderEpoch()); + + OffsetAndTimestamp timestampTopic2P0 = timestampOffsets.get(t2p0); + assertNull(timestampTopic2P0, "v0 message format shouldn't have timestamp"); + + OffsetAndTimestamp timestampTopic2P1 = timestampOffsets.get(t2p1); + assertNull(timestampTopic2P1); + + OffsetAndTimestamp timestampTopic3P0 = timestampOffsets.get(t3p0); + assertEquals(80, timestampTopic3P0.offset()); + assertEquals(80, timestampTopic3P0.timestamp()); + assertEquals(Optional.empty(), timestampTopic3P0.leaderEpoch()); + + assertNull(timestampOffsets.get(t3p1), "v1 message format doesn't have leader epoch"); + } + + } + + @ClusterTest + public void testEarliestOrLatestOffsetsWithClassicConsumer() { + testEarliestOrLatestOffsets(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testEarliestOrLatestOffsetsWithAsyncConsumer() { + testEarliestOrLatestOffsets(GroupProtocol.CONSUMER); + } + + public void testEarliestOrLatestOffsets(GroupProtocol groupProtocol) { + Set partitions = Set.of(t1p0, t1p1, t2p0, t2p1, t3p0, t3p1); + + try (Consumer consumer = cluster.consumer(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT))) + ) { + Map earliests = consumer.beginningOffsets(partitions); + assertEquals(0L, earliests.get(t1p0)); + assertEquals(0L, earliests.get(t1p1)); + assertEquals(0L, earliests.get(t2p0)); + assertEquals(0L, earliests.get(t2p1)); + assertEquals(0L, earliests.get(t3p0)); + assertEquals(0L, earliests.get(t3p1)); + + Map latests = consumer.endOffsets(partitions); + assertEquals(100L, latests.get(t1p0)); + assertEquals(100L, latests.get(t1p1)); + assertEquals(100L, latests.get(t2p0)); + assertEquals(100L, latests.get(t2p1)); + assertEquals(100L, latests.get(t3p0)); + assertEquals(100L, latests.get(t3p1)); + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerAssignTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerAssignTest.java new file mode 100644 index 0000000000000..b12b6dacb4d8b --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerAssignTest.java @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; + +import org.junit.jupiter.api.BeforeEach; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration tests for the consumer that covers logic related to manual assignment. + */ +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = PlaintextConsumerAssignTest.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "3"), + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "10"), + } +) +public class PlaintextConsumerAssignTest { + + public static final int BROKER_COUNT = 3; + + private final ClusterInstance clusterInstance; + private final String topic = "topic"; + private final int partition = 0; + TopicPartition tp = new TopicPartition(topic, partition); + + PlaintextConsumerAssignTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + @BeforeEach + public void setup() throws InterruptedException { + clusterInstance.createTopic(topic, 2, (short) BROKER_COUNT); + } + + @ClusterTest + public void testClassicAssignAndCommitAsyncNotCommitted() throws Exception { + testAssignAndCommitAsyncNotCommitted(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndCommitAsyncNotCommitted() throws Exception { + testAssignAndCommitAsyncNotCommitted(GroupProtocol.CONSUMER); + } + + private void testAssignAndCommitAsyncNotCommitted(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 10000; + long startingTimestamp = System.currentTimeMillis(); + CountConsumerCommitCallback cb = new CountConsumerCommitCallback(); + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumer.commitAsync(cb); + ClientsTestUtils.pollUntilTrue(consumer, () -> cb.successCount >= 1 || cb.lastError.isPresent(), + 10000, "Failed to observe commit callback before timeout"); + Map committedOffset = consumer.committed(Set.of(tp)); + assertNotNull(committedOffset); + // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to + // tp. The committed offset should be null. This is intentional. + assertNull(committedOffset.get(tp)); + assertTrue(consumer.assignment().contains(tp)); + } + } + + @ClusterTest + public void testClassicAssignAndCommitSyncNotCommitted() throws Exception { + testAssignAndCommitSyncNotCommitted(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndCommitSyncNotCommitted() { + testAssignAndCommitSyncNotCommitted(GroupProtocol.CONSUMER); + } + + private void testAssignAndCommitSyncNotCommitted(GroupProtocol groupProtocol) { + int numRecords = 10000; + long startingTimestamp = System.currentTimeMillis(); + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumer.commitSync(); + Map committedOffset = consumer.committed(Set.of(tp)); + assertNotNull(committedOffset); + // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to + // tp. The committed offset should be null. This is intentional. + assertNull(committedOffset.get(tp)); + assertTrue(consumer.assignment().contains(tp)); + } + } + + @ClusterTest + public void testClassicAssignAndCommitSyncAllConsumed() throws Exception { + testAssignAndCommitSyncAllConsumed(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndCommitSyncAllConsumed() throws Exception { + testAssignAndCommitSyncAllConsumed(GroupProtocol.CONSUMER); + } + + private void testAssignAndCommitSyncAllConsumed(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 10000; + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + long startingTimestamp = System.currentTimeMillis(); + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumer.seek(tp, 0); + ClientsTestUtils.consumeAndVerifyRecords(consumer, tp, numRecords, 0, 0, startingTimestamp); + + consumer.commitSync(); + Map committedOffset = consumer.committed(Set.of(tp)); + assertNotNull(committedOffset); + assertNotNull(committedOffset.get(tp)); + assertEquals(numRecords, committedOffset.get(tp).offset()); + } + } + + @ClusterTest + public void testClassicAssignAndConsume() throws InterruptedException { + testAssignAndConsume(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndConsume() throws InterruptedException { + testAssignAndConsume(GroupProtocol.CONSUMER); + } + + private void testAssignAndConsume(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 10; + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + long startingTimestamp = System.currentTimeMillis(); + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + ClientsTestUtils.consumeAndVerifyRecords(consumer, tp, numRecords, 0, 0, startingTimestamp); + + assertEquals(numRecords, consumer.position(tp)); + } + } + + @ClusterTest + public void testClassicAssignAndConsumeSkippingPosition() throws InterruptedException { + testAssignAndConsumeSkippingPosition(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndConsumeSkippingPosition() throws InterruptedException { + testAssignAndConsumeSkippingPosition(GroupProtocol.CONSUMER); + } + + private void testAssignAndConsumeSkippingPosition(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 10; + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + long startingTimestamp = System.currentTimeMillis(); + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + int offset = 1; + consumer.seek(tp, offset); + ClientsTestUtils.consumeAndVerifyRecords(consumer, tp, numRecords - offset, offset, offset, startingTimestamp + offset); + + assertEquals(numRecords, consumer.position(tp)); + } + } + + @ClusterTest + public void testClassicAssignAndFetchCommittedOffsets() throws InterruptedException { + testAssignAndFetchCommittedOffsets(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndFetchCommittedOffsets() throws InterruptedException { + testAssignAndFetchCommittedOffsets(GroupProtocol.CONSUMER); + } + + private void testAssignAndFetchCommittedOffsets(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 100; + long startingTimestamp = System.currentTimeMillis(); + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name, GROUP_ID_CONFIG, "group1"))) { + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + // First consumer consumes and commits offsets + consumer.seek(tp, 0); + ClientsTestUtils.consumeAndVerifyRecords(consumer, tp, numRecords, 0, 0, startingTimestamp); + consumer.commitSync(); + assertEquals(numRecords, consumer.committed(Set.of(tp)).get(tp).offset()); + } + + // We should see the committed offsets from another consumer + try (Consumer anotherConsumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name, GROUP_ID_CONFIG, "group1"))) { + anotherConsumer.assign(List.of(tp)); + assertEquals(numRecords, anotherConsumer.committed(Set.of(tp)).get(tp).offset()); + } + } + + @ClusterTest + public void testClassicAssignAndConsumeFromCommittedOffsets() throws InterruptedException { + testAssignAndConsumeFromCommittedOffsets(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndConsumeFromCommittedOffsets() throws InterruptedException { + testAssignAndConsumeFromCommittedOffsets(GroupProtocol.CONSUMER); + } + + private void testAssignAndConsumeFromCommittedOffsets(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 100; + int offset = 10; + long startingTimestamp = System.currentTimeMillis(); + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name, GROUP_ID_CONFIG, "group1"); + + try (Consumer consumer = clusterInstance.consumer(config)) { + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumer.commitSync(Map.of(tp, new OffsetAndMetadata(offset))); + assertEquals(offset, consumer.committed(Set.of(tp)).get(tp).offset()); + } + + // We should see the committed offsets from another consumer + try (Consumer anotherConsumer = clusterInstance.consumer(config)) { + assertEquals(offset, anotherConsumer.committed(Set.of(tp)).get(tp).offset()); + anotherConsumer.assign(List.of(tp)); + ClientsTestUtils.consumeAndVerifyRecords(anotherConsumer, tp, numRecords - offset, offset, offset, startingTimestamp + offset); + } + } + + @ClusterTest + public void testClassicAssignAndRetrievingCommittedOffsetsMultipleTimes() throws InterruptedException { + testAssignAndRetrievingCommittedOffsetsMultipleTimes(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncAssignAndRetrievingCommittedOffsetsMultipleTimes() throws InterruptedException { + testAssignAndRetrievingCommittedOffsetsMultipleTimes(GroupProtocol.CONSUMER); + } + + private void testAssignAndRetrievingCommittedOffsetsMultipleTimes(GroupProtocol groupProtocol) throws InterruptedException { + int numRecords = 100; + long startingTimestamp = System.currentTimeMillis(); + + try (Consumer consumer = clusterInstance.consumer(Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name))) { + ClientsTestUtils.sendRecords(clusterInstance, tp, numRecords, startingTimestamp); + consumer.assign(List.of(tp)); + + // Consume and commit offsets + consumer.seek(tp, 0); + ClientsTestUtils.consumeAndVerifyRecords(consumer, tp, numRecords, 0, 0, startingTimestamp); + consumer.commitSync(); + + // Check committed offsets twice with same consumer + assertEquals(numRecords, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(numRecords, consumer.committed(Set.of(tp)).get(tp).offset()); + } + } + + private static class CountConsumerCommitCallback implements OffsetCommitCallback { + int successCount = 0; + int failCount = 0; + Optional lastError = Optional.empty(); + + public void onComplete(Map offsets, Exception exception) { + if (exception == null) { + successCount += 1; + } else { + failCount += 1; + lastError = Optional.of(exception); + } + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCallbackTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCallbackTest.java index 800ca3b2c78fc..0e09d62033b3b 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCallbackTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCallbackTest.java @@ -16,25 +16,24 @@ */ package org.apache.kafka.clients.consumer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.ClientsTestUtils; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTestDefaults; import org.apache.kafka.common.test.api.Type; -import java.time.Duration; -import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; import static org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; import static org.apache.kafka.clients.consumer.GroupProtocol.CLASSIC; @@ -169,6 +168,80 @@ public void testClassicConsumerGetPositionOfNewlyAssignedPartitionOnPartitionsAs testGetPositionOfNewlyAssignedPartitionOnPartitionsAssignedCallback(CLASSIC); } + @ClusterTest + public void testOnPartitionsAssignedCalledWithNewPartitionsOnlyForClassicCooperative() throws InterruptedException { + try (var consumer = createClassicConsumerCooperativeProtocol()) { + testOnPartitionsAssignedCalledWithExpectedPartitions(consumer, true); + } + } + + @ClusterTest + public void testOnPartitionsAssignedCalledWithNewPartitionsOnlyForAsyncConsumer() throws InterruptedException { + try (var consumer = createConsumer(CONSUMER)) { + testOnPartitionsAssignedCalledWithExpectedPartitions(consumer, true); + } + } + + @ClusterTest + public void testOnPartitionsAssignedCalledWithNewPartitionsOnlyForClassicEager() throws InterruptedException { + try (var consumer = createConsumer(CLASSIC)) { + testOnPartitionsAssignedCalledWithExpectedPartitions(consumer, false); + } + } + + private void testOnPartitionsAssignedCalledWithExpectedPartitions( + Consumer consumer, + boolean expectNewPartitionsOnlyInCallback) throws InterruptedException { + subscribeAndExpectOnPartitionsAssigned(consumer, List.of(topic), List.of(tp)); + assertEquals(Set.of(tp), consumer.assignment()); + + // Add a new partition assignment while keeping the previous one + String newTopic = "newTopic"; + TopicPartition addedPartition = new TopicPartition(newTopic, 0); + List expectedPartitionsInCallback; + if (expectNewPartitionsOnlyInCallback) { + expectedPartitionsInCallback = List.of(addedPartition); + } else { + expectedPartitionsInCallback = List.of(tp, addedPartition); + } + + // Change subscription to keep the previous one and add a new topic. Assignment should be updated + // to contain partitions from both topics, but the onPartitionsAssigned parameters may containing + // the full new assignment or just the newly added partitions depending on the case. + subscribeAndExpectOnPartitionsAssigned( + consumer, + List.of(topic, newTopic), + expectedPartitionsInCallback); + assertEquals(Set.of(tp, addedPartition), consumer.assignment()); + } + + private void subscribeAndExpectOnPartitionsAssigned(Consumer consumer, List topics, Collection expectedPartitionsInCallback) throws InterruptedException { + var partitionsAssigned = new AtomicBoolean(false); + AtomicReference> partitionsFromCallback = new AtomicReference<>(); + consumer.subscribe(topics, new ConsumerRebalanceListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + if (partitions.containsAll(expectedPartitionsInCallback)) { + partitionsFromCallback.set(partitions); + partitionsAssigned.set(true); + } + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + // noop + } + }); + ClientsTestUtils.pollUntilTrue( + consumer, + partitionsAssigned::get, + "Timed out before expected rebalance completed" + ); + // These are different types, so comparing values instead + assertTrue(expectedPartitionsInCallback.containsAll(partitionsFromCallback.get()) && partitionsFromCallback.get().containsAll(expectedPartitionsInCallback), + "Expected partitions " + expectedPartitionsInCallback + " as parameter for onPartitionsAssigned, but got " + partitionsFromCallback.get()); + } + @ClusterTest public void testAsyncConsumerGetPositionOfNewlyAssignedPartitionOnPartitionsAssignedCallback() throws InterruptedException { testGetPositionOfNewlyAssignedPartitionOnPartitionsAssignedCallback(CONSUMER); @@ -198,7 +271,7 @@ private void testSeekPositionAndPauseNewlyAssignedPartitionOnPartitionsAssignedC var totalRecords = 120; var startingTimestamp = 0L; - sendRecords(totalRecords, startingTimestamp); + sendRecords(cluster, tp, totalRecords, startingTimestamp); triggerOnPartitionsAssigned(tp, consumer, (executeConsumer, partitions) -> { executeConsumer.seek(tp, startingOffset); @@ -209,6 +282,7 @@ private void testSeekPositionAndPauseNewlyAssignedPartitionOnPartitionsAssignedC consumer.resume(List.of(tp)); consumeAndVerifyRecords( consumer, + tp, (int) (totalRecords - startingOffset), (int) startingOffset, (int) startingOffset, @@ -238,11 +312,9 @@ public void onPartitionsRevoked(Collection partitions) { // noop } }); - TestUtils.waitForCondition( - () -> { - consumer.poll(Duration.ofMillis(100)); - return partitionsAssigned.get(); - }, + ClientsTestUtils.pollUntilTrue( + consumer, + partitionsAssigned::get, "Timed out before expected rebalance completed" ); } @@ -273,11 +345,9 @@ public void onPartitionsRevoked(Collection partitions) { } } }); - TestUtils.waitForCondition( - () -> { - consumer.poll(Duration.ofMillis(100)); - return partitionsAssigned.get(); - }, + ClientsTestUtils.pollUntilTrue( + consumer, + partitionsAssigned::get, "Timed out before expected rebalance completed" ); } @@ -291,62 +361,11 @@ private Consumer createConsumer(GroupProtocol protocol) { )); } - private void sendRecords(int numRecords, long startingTimestamp) { - try (Producer producer = cluster.producer()) { - for (var i = 0; i < numRecords; i++) { - var timestamp = startingTimestamp + i; - var record = new ProducerRecord<>( - tp.topic(), - tp.partition(), - timestamp, - ("key " + i).getBytes(), - ("value " + i).getBytes() - ); - producer.send(record); - } - producer.flush(); - } - } - - protected void consumeAndVerifyRecords( - Consumer consumer, - int numRecords, - int startingOffset, - int startingKeyAndValueIndex, - long startingTimestamp - ) throws InterruptedException { - var records = consumeRecords(consumer, numRecords); - for (var i = 0; i < numRecords; i++) { - var record = records.get(i); - var offset = startingOffset + i; - - assertEquals(tp.topic(), record.topic()); - assertEquals(tp.partition(), record.partition()); - - assertEquals(TimestampType.CREATE_TIME, record.timestampType()); - var timestamp = startingTimestamp + i; - assertEquals(timestamp, record.timestamp()); - - assertEquals(offset, record.offset()); - var keyAndValueIndex = startingKeyAndValueIndex + i; - assertEquals("key " + keyAndValueIndex, new String(record.key())); - assertEquals("value " + keyAndValueIndex, new String(record.value())); - // this is true only because K and V are byte arrays - assertEquals(("key " + keyAndValueIndex).length(), record.serializedKeySize()); - assertEquals(("value " + keyAndValueIndex).length(), record.serializedValueSize()); - } - } - - protected List> consumeRecords( - Consumer consumer, - int numRecords - ) throws InterruptedException { - List> records = new ArrayList<>(); - TestUtils.waitForCondition(() -> { - consumer.poll(Duration.ofMillis(100)).forEach(records::add); - return records.size() >= numRecords; - }, 60000, "Timed out before consuming expected " + numRecords + " records."); - - return records; + private Consumer createClassicConsumerCooperativeProtocol() { + return cluster.consumer(Map.of( + GROUP_PROTOCOL_CONFIG, CLASSIC.name.toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, "false", + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "org.apache.kafka.clients.consumer.CooperativeStickyAssignor" + )); } } diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCommitTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCommitTest.java new file mode 100644 index 0000000000000..c00d1ddab90a0 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerCommitTest.java @@ -0,0 +1,636 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.internals.Topic; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.test.MockConsumerInterceptor; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.BeforeEach; + +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.apache.kafka.clients.ClientsTestUtils.awaitAssignment; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; +import static org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG; +import static org.apache.kafka.clients.consumer.PlaintextConsumerCommitTest.BROKER_COUNT; +import static org.apache.kafka.clients.consumer.PlaintextConsumerCommitTest.OFFSETS_TOPIC_PARTITIONS; +import static org.apache.kafka.clients.consumer.PlaintextConsumerCommitTest.OFFSETS_TOPIC_REPLICATION; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = OFFSETS_TOPIC_PARTITIONS), + @ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = OFFSETS_TOPIC_REPLICATION), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + } +) +public class PlaintextConsumerCommitTest { + + public static final int BROKER_COUNT = 3; + public static final String OFFSETS_TOPIC_PARTITIONS = "1"; + public static final String OFFSETS_TOPIC_REPLICATION = "3"; + private final ClusterInstance cluster; + private final String topic = "topic"; + private final TopicPartition tp = new TopicPartition(topic, 0); + private final TopicPartition tp1 = new TopicPartition(topic, 1); + + public PlaintextConsumerCommitTest(ClusterInstance clusterInstance) { + this.cluster = clusterInstance; + } + + @BeforeEach + public void setup() throws InterruptedException { + cluster.createTopic(topic, 2, (short) BROKER_COUNT); + } + + @ClusterTest + public void testClassicConsumerAutoCommitOnClose() throws InterruptedException { + testAutoCommitOnClose(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerAutoCommitOnClose() throws InterruptedException { + testAutoCommitOnClose(GroupProtocol.CONSUMER); + } + + private void testAutoCommitOnClose(GroupProtocol groupProtocol) throws InterruptedException { + try (var consumer = createConsumer(groupProtocol, true)) { + sendRecords(cluster, tp, 1000); + + consumer.subscribe(List.of(topic)); + awaitAssignment(consumer, Set.of(tp, tp1)); + // should auto-commit sought positions before closing + consumer.seek(tp, 300); + consumer.seek(tp1, 500); + } + + // now we should see the committed positions from another consumer + try (var anotherConsumer = createConsumer(groupProtocol, true)) { + assertEquals(300, anotherConsumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(500, anotherConsumer.committed(Set.of(tp1)).get(tp1).offset()); + } + } + + @ClusterTest + public void testClassicConsumerAutoCommitOnCloseAfterWakeup() throws InterruptedException { + testAutoCommitOnCloseAfterWakeup(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerAutoCommitOnCloseAfterWakeup() throws InterruptedException { + testAutoCommitOnCloseAfterWakeup(GroupProtocol.CONSUMER); + } + + private void testAutoCommitOnCloseAfterWakeup(GroupProtocol groupProtocol) throws InterruptedException { + try (var consumer = createConsumer(groupProtocol, true)) { + sendRecords(cluster, tp, 1000); + + consumer.subscribe(List.of(topic)); + awaitAssignment(consumer, Set.of(tp, tp1)); + + // should auto-commit sought positions before closing + consumer.seek(tp, 300); + consumer.seek(tp1, 500); + + // wakeup the consumer before closing to simulate trying to break a poll + // loop from another thread + consumer.wakeup(); + } + + // now we should see the committed positions from another consumer + try (var anotherConsumer = createConsumer(groupProtocol, true)) { + assertEquals(300, anotherConsumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(500, anotherConsumer.committed(Set.of(tp1)).get(tp1).offset()); + } + } + + @ClusterTest + public void testClassicConsumerCommitMetadata() throws InterruptedException { + testCommitMetadata(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerCommitMetadata() throws InterruptedException { + testCommitMetadata(GroupProtocol.CONSUMER); + } + + private void testCommitMetadata(GroupProtocol groupProtocol) throws InterruptedException { + try (var consumer = createConsumer(groupProtocol, true)) { + consumer.assign(List.of(tp)); + // sync commit + var syncMetadata = new OffsetAndMetadata(5, Optional.of(15), "foo"); + consumer.commitSync(Map.of(tp, syncMetadata)); + assertEquals(syncMetadata, consumer.committed(Set.of(tp)).get(tp)); + + // async commit + var asyncMetadata = new OffsetAndMetadata(10, "bar"); + sendAndAwaitAsyncCommit(consumer, Map.of(tp, asyncMetadata)); + assertEquals(asyncMetadata, consumer.committed(Set.of(tp)).get(tp)); + + // handle null metadata + var nullMetadata = new OffsetAndMetadata(5, null); + consumer.commitSync(Map.of(tp, nullMetadata)); + assertEquals(nullMetadata, consumer.committed(Set.of(tp)).get(tp)); + } + } + + @ClusterTest + public void testClassicConsumerAsyncCommit() throws InterruptedException { + testAsyncCommit(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerAsyncCommit() throws InterruptedException { + testAsyncCommit(GroupProtocol.CONSUMER); + } + + private void testAsyncCommit(GroupProtocol groupProtocol) throws InterruptedException { + // Ensure the __consumer_offsets topic is created to prevent transient issues, + // such as RetriableCommitFailedException during async offset commits. + cluster.createTopic( + Topic.GROUP_METADATA_TOPIC_NAME, + Integer.parseInt(OFFSETS_TOPIC_PARTITIONS), + Short.parseShort(OFFSETS_TOPIC_REPLICATION) + ); + try (var consumer = createConsumer(groupProtocol, false)) { + consumer.assign(List.of(tp)); + + var callback = new CountConsumerCommitCallback(); + var count = 5; + for (var i = 1; i <= count; i++) + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(i)), callback); + + ClientsTestUtils.pollUntilTrue( + consumer, + () -> callback.successCount >= count || callback.lastError.isPresent(), + "Failed to observe commit callback before timeout" + ); + + assertEquals(Optional.empty(), callback.lastError); + assertEquals(count, callback.successCount); + assertEquals(new OffsetAndMetadata(count), consumer.committed(Set.of(tp)).get(tp)); + } + } + + @ClusterTest + public void testClassicConsumerAutoCommitIntercept() throws InterruptedException { + testAutoCommitIntercept(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerAutoCommitIntercept() throws InterruptedException { + testAutoCommitIntercept(GroupProtocol.CONSUMER); + } + + private void testAutoCommitIntercept(GroupProtocol groupProtocol) throws InterruptedException { + var topic2 = "topic2"; + cluster.createTopic(topic2, 2, (short) 3); + var numRecords = 100; + try (var producer = cluster.producer(); + // create consumer with interceptor + Consumer consumer = cluster.consumer(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, "true", + INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor" + )) + ) { + // produce records + for (var i = 0; i < numRecords; i++) { + producer.send(new ProducerRecord<>(tp.topic(), tp.partition(), ("key " + i).getBytes(), ("value " + i).getBytes())); + } + + var rebalanceListener = new ConsumerRebalanceListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + // keep partitions paused in this test so that we can verify the commits based on specific seeks + consumer.pause(partitions); + } + @Override + public void onPartitionsRevoked(Collection partitions) { + // No-op + } + }; + + changeConsumerSubscriptionAndValidateAssignment( + consumer, + List.of(topic), + Set.of(tp, tp1), + rebalanceListener + ); + consumer.seek(tp, 10); + consumer.seek(tp1, 20); + + // change subscription to trigger rebalance + var commitCountBeforeRebalance = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue(); + var expectedAssignment = Set.of(tp, tp1, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)); + changeConsumerSubscriptionAndValidateAssignment( + consumer, + List.of(topic, topic2), + expectedAssignment, + rebalanceListener + ); + + // after rebalancing, we should have reset to the committed positions + var committed1 = consumer.committed(Set.of(tp)); + assertEquals(10, committed1.get(tp).offset()); + var committed2 = consumer.committed(Set.of(tp1)); + assertEquals(20, committed2.get(tp1).offset()); + + // In both CLASSIC and CONSUMER protocols, interceptors are executed in poll and close. + // However, in the CONSUMER protocol, the assignment may be changed outside a poll, so + // we need to poll once to ensure the interceptor is called. + if (groupProtocol == GroupProtocol.CONSUMER) { + consumer.poll(Duration.ZERO); + } + + assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeRebalance); + + // verify commits are intercepted on close + var commitCountBeforeClose = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue(); + consumer.close(); + assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeClose); + producer.close(); + // cleanup + MockConsumerInterceptor.resetCounters(); + } + } + + @ClusterTest + public void testClassicConsumerCommitSpecifiedOffsets() throws InterruptedException { + testCommitSpecifiedOffsets(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerCommitSpecifiedOffsets() throws InterruptedException { + testCommitSpecifiedOffsets(GroupProtocol.CONSUMER); + } + + private void testCommitSpecifiedOffsets(GroupProtocol groupProtocol) throws InterruptedException { + try (Producer producer = cluster.producer(); + var consumer = createConsumer(groupProtocol, false) + ) { + sendRecords(producer, tp, 5, System.currentTimeMillis()); + sendRecords(producer, tp1, 7, System.currentTimeMillis()); + + consumer.assign(List.of(tp, tp1)); + + var pos1 = consumer.position(tp); + var pos2 = consumer.position(tp1); + + consumer.commitSync(Map.of(tp, new OffsetAndMetadata(3L))); + + assertEquals(3, consumer.committed(Set.of(tp)).get(tp).offset()); + assertNull(consumer.committed(Collections.singleton(tp1)).get(tp1)); + + // Positions should not change + assertEquals(pos1, consumer.position(tp)); + assertEquals(pos2, consumer.position(tp1)); + + consumer.commitSync(Map.of(tp1, new OffsetAndMetadata(5L))); + + assertEquals(3, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(5, consumer.committed(Set.of(tp1)).get(tp1).offset()); + + // Using async should pick up the committed changes after commit completes + sendAndAwaitAsyncCommit(consumer, Map.of(tp1, new OffsetAndMetadata(7L))); + assertEquals(7, consumer.committed(Collections.singleton(tp1)).get(tp1).offset()); + } + } + + @ClusterTest + public void testClassicConsumerAutoCommitOnRebalance() throws InterruptedException { + testAutoCommitOnRebalance(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerAutoCommitOnRebalance() throws InterruptedException { + testAutoCommitOnRebalance(GroupProtocol.CONSUMER); + } + + private void testAutoCommitOnRebalance(GroupProtocol groupProtocol) throws InterruptedException { + var topic2 = "topic2"; + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + try (var consumer = createConsumer(groupProtocol, true)) { + sendRecords(cluster, tp, 1000); + + var rebalanceListener = new ConsumerRebalanceListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + // keep partitions paused in this test so that we can verify the commits based on specific seeks + consumer.pause(partitions); + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + + } + }; + + consumer.subscribe(List.of(topic), rebalanceListener); + awaitAssignment(consumer, Set.of(tp, tp1)); + + consumer.seek(tp, 300); + consumer.seek(tp1, 500); + // change subscription to trigger rebalance + consumer.subscribe(List.of(topic, topic2), rebalanceListener); + + var newAssignment = Set.of(tp, tp1, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)); + awaitAssignment(consumer, newAssignment); + + // after rebalancing, we should have reset to the committed positions + assertEquals(300, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(500, consumer.committed(Set.of(tp1)).get(tp1).offset()); + } + } + + @ClusterTest + public void testClassicConsumerSubscribeAndCommitSync() throws InterruptedException { + testSubscribeAndCommitSync(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerSubscribeAndCommitSync() throws InterruptedException { + testSubscribeAndCommitSync(GroupProtocol.CONSUMER); + } + + private void testSubscribeAndCommitSync(GroupProtocol groupProtocol) throws InterruptedException { + // This test ensure that the member ID is propagated from the group coordinator when the + // assignment is received into a subsequent offset commit + try (var consumer = createConsumer(groupProtocol, false)) { + assertEquals(0, consumer.assignment().size()); + consumer.subscribe(List.of(topic)); + awaitAssignment(consumer, Set.of(tp, tp1)); + + consumer.seek(tp, 0); + consumer.commitSync(); + } + } + + @ClusterTest + public void testClassicConsumerPositionAndCommit() throws InterruptedException { + testPositionAndCommit(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPositionAndCommit() throws InterruptedException { + testPositionAndCommit(GroupProtocol.CONSUMER); + } + + private void testPositionAndCommit(GroupProtocol groupProtocol) throws InterruptedException { + try (Producer producer = cluster.producer(); + var consumer = createConsumer(groupProtocol, false); + var otherConsumer = createConsumer(groupProtocol, false) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, tp, 5, startingTimestamp); + + var topicPartition = new TopicPartition(topic, 15); + assertNull(consumer.committed(Collections.singleton(topicPartition)).get(topicPartition)); + + // position() on a partition that we aren't subscribed to throws an exception + assertThrows(IllegalStateException.class, () -> consumer.position(topicPartition)); + + consumer.assign(List.of(tp)); + + assertEquals(0L, consumer.position(tp), "position() on a partition that we are subscribed to should reset the offset"); + consumer.commitSync(); + assertEquals(0L, consumer.committed(Set.of(tp)).get(tp).offset()); + consumeAndVerifyRecords(consumer, tp, 5, 0, 0, startingTimestamp); + assertEquals(5L, consumer.position(tp), "After consuming 5 records, position should be 5"); + consumer.commitSync(); + assertEquals(5L, consumer.committed(Set.of(tp)).get(tp).offset(), "Committed offset should be returned"); + + startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, tp, 1, startingTimestamp); + + // another consumer in the same group should get the same position + otherConsumer.assign(List.of(tp)); + consumeAndVerifyRecords(otherConsumer, tp, 1, 5, 0, startingTimestamp); + } + } + + /** + * This is testing when closing the consumer but commit request has already been sent. + * During the closing, the consumer won't find the coordinator anymore. + */ + @ClusterTest + public void testCommitAsyncFailsWhenCoordinatorUnavailableDuringClose() throws InterruptedException { + try (Producer producer = cluster.producer(); + var consumer = createConsumer(GroupProtocol.CONSUMER, false) + ) { + sendRecords(producer, tp, 3, System.currentTimeMillis()); + consumer.assign(List.of(tp)); + + var callback = new CountConsumerCommitCallback(); + + // Close the coordinator before committing because otherwise the commit will fail to find the coordinator. + cluster.brokerIds().forEach(cluster::shutdownBroker); + + TestUtils.waitForCondition(() -> cluster.aliveBrokers().isEmpty(), "All brokers should be shut down"); + + consumer.poll(Duration.ofMillis(500)); + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(1L)), callback); + + long startTime = System.currentTimeMillis(); + consumer.close(CloseOptions.timeout(Duration.ofMillis(500))); + long closeDuration = System.currentTimeMillis() - startTime; + + assertTrue(closeDuration < 1000, "The closing process for the consumer was too long: " + closeDuration + " ms"); + assertTrue(callback.lastError.isPresent()); + assertEquals(CommitFailedException.class, callback.lastError.get().getClass()); + assertEquals("Failed to commit offsets: Coordinator unknown and consumer is closing", callback.lastError.get().getMessage()); + assertEquals(1, callback.exceptionCount); + } + } + + // TODO: This only works in the new consumer, but should be fixed for the old consumer as well + @ClusterTest + public void testCommitAsyncCompletedBeforeConsumerCloses() throws InterruptedException { + // This is testing the contract that asynchronous offset commit are completed before the consumer + // is closed, even when no commit sync is performed as part of the close (due to auto-commit + // disabled, or simply because there are no consumed offsets). + + // Create offsets topic to ensure coordinator is available during close + cluster.createTopic(Topic.GROUP_METADATA_TOPIC_NAME, Integer.parseInt(OFFSETS_TOPIC_PARTITIONS), Short.parseShort(OFFSETS_TOPIC_REPLICATION)); + + try (Producer producer = cluster.producer(Map.of(ProducerConfig.ACKS_CONFIG, "all")); + var consumer = createConsumer(GroupProtocol.CONSUMER, false) + ) { + sendRecords(producer, tp, 3, System.currentTimeMillis()); + sendRecords(producer, tp1, 3, System.currentTimeMillis()); + consumer.assign(List.of(tp, tp1)); + + // Try without looking up the coordinator first + var cb = new CountConsumerCommitCallback(); + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(1L)), cb); + consumer.commitAsync(Map.of(tp1, new OffsetAndMetadata(1L)), cb); + + consumer.close(); + assertEquals(2, cb.successCount); + } + } + + // TODO: This only works in the new consumer, but should be fixed for the old consumer as well + @ClusterTest + public void testCommitAsyncCompletedBeforeCommitSyncReturns() { + // This is testing the contract that asynchronous offset commits sent previously with the + // `commitAsync` are guaranteed to have their callbacks invoked prior to completion of + // `commitSync` (given that it does not time out). + try (Producer producer = cluster.producer(); + var consumer = createConsumer(GroupProtocol.CONSUMER, false) + ) { + sendRecords(producer, tp, 3, System.currentTimeMillis()); + sendRecords(producer, tp1, 3, System.currentTimeMillis()); + + consumer.assign(List.of(tp, tp1)); + + // Try without looking up the coordinator first + var cb = new CountConsumerCommitCallback(); + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(1L)), cb); + consumer.commitSync(Map.of()); + + assertEquals(1, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(1, cb.successCount); + + // Try with coordinator known + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(2L)), cb); + consumer.commitSync(Map.of(tp1, new OffsetAndMetadata(2L))); + + assertEquals(2, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(2, consumer.committed(Set.of(tp1)).get(tp1).offset()); + assertEquals(2, cb.successCount); + + // Try with empty sync commit + consumer.commitAsync(Map.of(tp, new OffsetAndMetadata(3L)), cb); + consumer.commitSync(Map.of()); + + assertEquals(3, consumer.committed(Set.of(tp)).get(tp).offset()); + assertEquals(2, consumer.committed(Set.of(tp1)).get(tp1).offset()); + assertEquals(3, cb.successCount); + } + } + + private Consumer createConsumer(GroupProtocol protocol, boolean enableAutoCommit) { + return cluster.consumer(Map.of( + GROUP_ID_CONFIG, "test-group", + GROUP_PROTOCOL_CONFIG, protocol.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit + )); + } + + private void sendAndAwaitAsyncCommit( + Consumer consumer, + Map offsetsOpt + ) throws InterruptedException { + var commitCallback = new RetryCommitCallback(consumer, offsetsOpt); + + commitCallback.sendAsyncCommit(); + ClientsTestUtils.pollUntilTrue( + consumer, + () -> commitCallback.isComplete, + "Failed to observe commit callback before timeout" + ); + + assertEquals(Optional.empty(), commitCallback.error); + } + + private static class RetryCommitCallback implements OffsetCommitCallback { + private boolean isComplete = false; + private Optional error = Optional.empty(); + + private final Consumer consumer; + private final Map offsetsOpt; + + public RetryCommitCallback( + Consumer consumer, + Map offsetsOpt + ) { + this.consumer = consumer; + this.offsetsOpt = offsetsOpt; + } + + @Override + public void onComplete(Map offsets, Exception exception) { + if (exception instanceof RetriableCommitFailedException) { + sendAsyncCommit(); + } else { + isComplete = true; + error = Optional.ofNullable(exception); + } + } + + void sendAsyncCommit() { + consumer.commitAsync(offsetsOpt, this); + } + } + + private static class CountConsumerCommitCallback implements OffsetCommitCallback { + private int successCount = 0; + private int exceptionCount = 0; + private Optional lastError = Optional.empty(); + + @Override + public void onComplete(Map offsets, Exception exception) { + if (exception == null) { + successCount += 1; + } else { + exceptionCount += 1; + lastError = Optional.of(exception); + } + } + } + + private void changeConsumerSubscriptionAndValidateAssignment( + Consumer consumer, + List topicsToSubscribe, + Set expectedAssignment, + ConsumerRebalanceListener rebalanceListener + ) throws InterruptedException { + consumer.subscribe(topicsToSubscribe, rebalanceListener); + awaitAssignment(consumer, expectedAssignment); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerFetchTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerFetchTest.java new file mode 100644 index 0000000000000..e65dd0b7ed98f --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerFetchTest.java @@ -0,0 +1,489 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; + +import org.junit.jupiter.api.BeforeEach; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.apache.kafka.clients.ClientsTestUtils.awaitAssignment; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.consumeRecords; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; +import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.FETCH_MAX_BYTES_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = PlaintextConsumerFetchTest.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "3"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + } +) +public class PlaintextConsumerFetchTest { + + public static final int BROKER_COUNT = 3; + private final ClusterInstance cluster; + private final String topic = "topic"; + private final TopicPartition tp = new TopicPartition(topic, 0); + private final TopicPartition tp2 = new TopicPartition(topic, 1); + + public PlaintextConsumerFetchTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @BeforeEach + public void setup() throws InterruptedException { + cluster.createTopic(topic, 2, (short) BROKER_COUNT); + } + + @ClusterTest + public void testClassicConsumerFetchInvalidOffset() { + testFetchInvalidOffset(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchInvalidOffset() { + testFetchInvalidOffset(GroupProtocol.CONSUMER); + } + + private void testFetchInvalidOffset(GroupProtocol groupProtocol) { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + AUTO_OFFSET_RESET_CONFIG, "none" + ); + try (var consumer = cluster.consumer(config)) { + // produce one record + var totalRecords = 2; + sendRecords(cluster, tp, totalRecords); + consumer.assign(List.of(tp)); + + // poll should fail because there is no offset reset strategy set. + // we fail only when resetting positions after coordinator is known, so using a long timeout. + assertThrows(NoOffsetForPartitionException.class, () -> consumer.poll(Duration.ofMillis(15000))); + + // seek to out of range position + var outOfRangePos = totalRecords + 1; + consumer.seek(tp, outOfRangePos); + var e = assertThrows(OffsetOutOfRangeException.class, () -> consumer.poll(Duration.ofMillis(20000))); + var outOfRangePartitions = e.offsetOutOfRangePartitions(); + assertNotNull(outOfRangePartitions); + assertEquals(1, outOfRangePartitions.size()); + assertEquals(outOfRangePos, outOfRangePartitions.get(tp).longValue()); + } + } + + @ClusterTest + public void testClassicConsumerFetchOutOfRangeOffsetResetConfigEarliest() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigEarliest(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchOutOfRangeOffsetResetConfigEarliest() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigEarliest(GroupProtocol.CONSUMER); + } + + private void testFetchOutOfRangeOffsetResetConfigEarliest(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + // ensure no in-flight fetch request so that the offset can be reset immediately + FETCH_MAX_WAIT_MS_CONFIG, 0 + ); + try (Consumer consumer = cluster.consumer(config)) { + var totalRecords = 10; + var startingTimestamp = 0; + sendRecords(cluster, tp, totalRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumeAndVerifyRecords(consumer, tp, totalRecords, 0); + // seek to out of range position + var outOfRangePos = totalRecords + 1; + consumer.seek(tp, outOfRangePos); + // assert that poll resets to the beginning position + consumeAndVerifyRecords(consumer, tp, 1, 0); + } + } + + @ClusterTest + public void testClassicConsumerFetchOutOfRangeOffsetResetConfigLatest() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigLatest(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchOutOfRangeOffsetResetConfigLatest() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigLatest(GroupProtocol.CONSUMER); + } + + private void testFetchOutOfRangeOffsetResetConfigLatest(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + AUTO_OFFSET_RESET_CONFIG, "latest", + // ensure no in-flight fetch request so that the offset can be reset immediately + FETCH_MAX_WAIT_MS_CONFIG, 0 + ); + try (Consumer consumer = cluster.consumer(config); + Producer producer = cluster.producer() + ) { + var totalRecords = 10; + var startingTimestamp = 0; + sendRecords(producer, tp, totalRecords, startingTimestamp); + consumer.assign(List.of(tp)); + consumer.seek(tp, 0); + + // consume some, but not all the records + consumeAndVerifyRecords(consumer, tp, totalRecords / 2, 0); + // seek to out of range position + var outOfRangePos = totalRecords + 17; // arbitrary, much higher offset + consumer.seek(tp, outOfRangePos); + // assert that poll resets to the ending position + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty()); + sendRecords(producer, tp, totalRecords, totalRecords); + var nextRecord = consumer.poll(Duration.ofMillis(50)).iterator().next(); + // ensure the seek went to the last known record at the time of the previous poll + assertEquals(totalRecords, nextRecord.offset()); + } + } + + @ClusterTest + public void testClassicConsumerFetchOutOfRangeOffsetResetConfigByDuration() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigByDuration(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchOutOfRangeOffsetResetConfigByDuration() throws InterruptedException { + testFetchOutOfRangeOffsetResetConfigByDuration(GroupProtocol.CONSUMER); + } + + private void testFetchOutOfRangeOffsetResetConfigByDuration(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + AUTO_OFFSET_RESET_CONFIG, "by_duration:PT1H", + // ensure no in-flight fetch request so that the offset can be reset immediately + FETCH_MAX_WAIT_MS_CONFIG, 0 + ); + try (Consumer consumer1 = cluster.consumer(config); + Consumer consumer2 = cluster.consumer(config) + ) { + var totalRecords = 10; + var startingTimestamp = System.currentTimeMillis(); + sendRecords(cluster, tp, totalRecords, startingTimestamp); + consumer1.assign(List.of(tp)); + consumeAndVerifyRecords( + consumer1, + tp, + totalRecords, + 0, + 0, + startingTimestamp + ); + + // seek to out of range position + var outOfRangePos = totalRecords + 1; + consumer1.seek(tp, outOfRangePos); + // assert that poll resets to the beginning position + consumeAndVerifyRecords( + consumer1, + tp, + 1, + 0, + 0, + startingTimestamp + ); + + // Test the scenario where starting offset is earlier than the requested duration + var totalRecords2 = 25; + startingTimestamp = Instant.now().minus(Duration.ofHours(24)).toEpochMilli(); + + // generate records with 1 hour interval for 1 day + var hourMillis = Duration.ofHours(1).toMillis(); + sendRecords(cluster, tp2, totalRecords2, startingTimestamp, hourMillis); + consumer2.assign(List.of(tp2)); + // consumer should read one record from last one hour + consumeAndVerifyRecords( + consumer2, + tp2, + 1, + 24, + 24, + startingTimestamp + 24 * hourMillis, + hourMillis + ); + + // seek to out of range position + outOfRangePos = totalRecords2 + 1; + consumer2.seek(tp2, outOfRangePos); + // assert that poll resets to the duration offset. consumer should read one record from last one hour + consumeAndVerifyRecords( + consumer2, + tp2, + 1, + 24, + 24, + startingTimestamp + 24 * hourMillis, + hourMillis + ); + } + } + + @ClusterTest + public void testClassicConsumerFetchRecordLargerThanFetchMaxBytes() throws InterruptedException { + testFetchRecordLargerThanFetchMaxBytes(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchRecordLargerThanFetchMaxBytes() throws InterruptedException { + testFetchRecordLargerThanFetchMaxBytes(GroupProtocol.CONSUMER); + } + + private void testFetchRecordLargerThanFetchMaxBytes(GroupProtocol groupProtocol) throws InterruptedException { + int maxFetchBytes = 10 * 1024; + checkLargeRecord(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + FETCH_MAX_BYTES_CONFIG, maxFetchBytes + ), maxFetchBytes + 1); + } + + @ClusterTest + public void testClassicConsumerFetchRecordLargerThanMaxPartitionFetchBytes() throws InterruptedException { + testFetchRecordLargerThanMaxPartitionFetchBytes(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchRecordLargerThanMaxPartitionFetchBytes() throws InterruptedException { + testFetchRecordLargerThanMaxPartitionFetchBytes(GroupProtocol.CONSUMER); + } + + private void testFetchRecordLargerThanMaxPartitionFetchBytes(GroupProtocol groupProtocol) throws InterruptedException { + int maxFetchBytes = 10 * 1024; + checkLargeRecord(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + MAX_PARTITION_FETCH_BYTES_CONFIG, maxFetchBytes + ), maxFetchBytes + 1); + } + + private void checkLargeRecord(Map config, int producerRecordSize) throws InterruptedException { + try (Consumer consumer = cluster.consumer(config); + Producer producer = cluster.producer() + ) { + // produce a record that is larger than the configured fetch size + var record = new ProducerRecord<>( + tp.topic(), + tp.partition(), + "key".getBytes(), + new byte[producerRecordSize] + ); + producer.send(record); + + // consuming a record that is too large should succeed since KIP-74 + consumer.assign(List.of(tp)); + var records = consumeRecords(consumer, 1); + assertEquals(1, records.size()); + var consumerRecord = records.iterator().next(); + assertEquals(0L, consumerRecord.offset()); + assertEquals(tp.topic(), consumerRecord.topic()); + assertEquals(tp.partition(), consumerRecord.partition()); + assertArrayEquals(record.key(), consumerRecord.key()); + assertArrayEquals(record.value(), consumerRecord.value()); + } + } + + @ClusterTest + public void testClassicConsumerFetchHonoursFetchSizeIfLargeRecordNotFirst() throws ExecutionException, InterruptedException { + testFetchHonoursFetchSizeIfLargeRecordNotFirst(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchHonoursFetchSizeIfLargeRecordNotFirst() throws ExecutionException, InterruptedException { + testFetchHonoursFetchSizeIfLargeRecordNotFirst(GroupProtocol.CONSUMER); + } + + private void testFetchHonoursFetchSizeIfLargeRecordNotFirst(GroupProtocol groupProtocol) throws ExecutionException, InterruptedException { + int maxFetchBytes = 10 * 1024; + checkFetchHonoursSizeIfLargeRecordNotFirst(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + FETCH_MAX_BYTES_CONFIG, maxFetchBytes + ), maxFetchBytes); + } + + @ClusterTest + public void testClassicConsumerFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst() throws ExecutionException, InterruptedException { + testFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst() throws ExecutionException, InterruptedException { + testFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst(GroupProtocol.CONSUMER); + } + + private void testFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst(GroupProtocol groupProtocol) throws ExecutionException, InterruptedException { + int maxFetchBytes = 10 * 1024; + checkFetchHonoursSizeIfLargeRecordNotFirst(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + MAX_PARTITION_FETCH_BYTES_CONFIG, maxFetchBytes + ), maxFetchBytes); + } + + private void checkFetchHonoursSizeIfLargeRecordNotFirst( + Map config, + int largeProducerRecordSize + ) throws ExecutionException, InterruptedException { + try (Consumer consumer = cluster.consumer(config); + Producer producer = cluster.producer() + ) { + var smallRecord = new ProducerRecord<>( + tp.topic(), + tp.partition(), + "small".getBytes(), + "value".getBytes() + ); + var largeRecord = new ProducerRecord<>( + tp.topic(), + tp.partition(), + "large".getBytes(), + new byte[largeProducerRecordSize] + ); + + producer.send(smallRecord).get(); + producer.send(largeRecord).get(); + + // we should only get the small record in the first `poll` + consumer.assign(List.of(tp)); + + var records = consumeRecords(consumer, 1); + assertEquals(1, records.size()); + var consumerRecord = records.iterator().next(); + assertEquals(0L, consumerRecord.offset()); + assertEquals(tp.topic(), consumerRecord.topic()); + assertEquals(tp.partition(), consumerRecord.partition()); + assertArrayEquals(smallRecord.key(), consumerRecord.key()); + assertArrayEquals(smallRecord.value(), consumerRecord.value()); + } + } + + @ClusterTest + public void testClassicConsumerLowMaxFetchSizeForRequestAndPartition() throws InterruptedException { + testLowMaxFetchSizeForRequestAndPartition(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerLowMaxFetchSizeForRequestAndPartition() throws InterruptedException { + testLowMaxFetchSizeForRequestAndPartition(GroupProtocol.CONSUMER); + } + + private void testLowMaxFetchSizeForRequestAndPartition(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + // one of the effects of this is that there will be some log reads where `0 > remaining limit bytes < message size` + // and we don't return the message because it's not the first message in the first non-empty partition of the fetch + // this behaves a little different from when remaining limit bytes is 0, and it's important to test it + FETCH_MAX_BYTES_CONFIG, 500, + MAX_PARTITION_FETCH_BYTES_CONFIG, 100, + // Avoid a rebalance while the records are being sent (the default is 6 seconds) + MAX_POLL_INTERVAL_MS_CONFIG, 20000 + ); + + try (Consumer consumer = cluster.consumer(config); + Producer producer = cluster.producer() + ) { + var partitionCount = 30; + var topics = List.of("topic1", "topic2", "topic3"); + + for (var topicName : topics) { + cluster.createTopic(topicName, partitionCount, (short) BROKER_COUNT); + } + + Set partitions = new HashSet<>(); + for (var topic : topics) { + for (var i = 0; i < partitionCount; i++) { + partitions.add(new TopicPartition(topic, i)); + } + } + + assertEquals(0, consumer.assignment().size()); + consumer.subscribe(topics); + awaitAssignment(consumer, partitions); + + List> producerRecords = new ArrayList<>(); + for (var partition : partitions) { + producerRecords.addAll(sendRecords(producer, partition, partitionCount, System.currentTimeMillis(), -1)); + } + + List> consumerRecords = consumeRecords(consumer, producerRecords.size()); + + Map>> consumedByPartition = new HashMap<>(); + for (var record : consumerRecords) { + var tp = new TopicPartition(record.topic(), record.partition()); + consumedByPartition.computeIfAbsent(tp, k -> new ArrayList<>()).add(record); + } + + Map>> producedByPartition = new HashMap<>(); + for (var record : producerRecords) { + var tp = new TopicPartition(record.topic(), record.partition()); + producedByPartition.computeIfAbsent(tp, k -> new ArrayList<>()).add(record); + } + + for (var partition : partitions) { + var produced = producedByPartition.getOrDefault(partition, List.of()); + var consumed = consumedByPartition.getOrDefault(partition, List.of()); + + assertEquals(produced.size(), consumed.size(), "Records count mismatch for " + partition); + + for (var i = 0; i < produced.size(); i++) { + var producerRecord = produced.get(i); + var consumerRecord = consumed.get(i); + + assertEquals(producerRecord.topic(), consumerRecord.topic()); + assertEquals(producerRecord.partition(), consumerRecord.partition()); + assertArrayEquals(producerRecord.key(), consumerRecord.key()); + assertArrayEquals(producerRecord.value(), consumerRecord.value()); + assertEquals(producerRecord.timestamp(), consumerRecord.timestamp()); + } + } + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerPollTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerPollTest.java new file mode 100644 index 0000000000000..71853d97814db --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerPollTest.java @@ -0,0 +1,680 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.clients.ClientsTestUtils.TestConsumerReassignmentListener; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.BeforeEach; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.IntStream; + +import static org.apache.kafka.clients.ClientsTestUtils.awaitRebalance; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.ensureNoRebalance; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; +import static org.apache.kafka.clients.ClientsTestUtils.waitForPollThrowException; +import static org.apache.kafka.clients.CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.CLIENT_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.MAX_POLL_RECORDS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.CONSUMER_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = PlaintextConsumerPollTest.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "3"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, value = "500"), + @ClusterConfigProperty(key = CONSUMER_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, value = "500"), + @ClusterConfigProperty(key = GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "10"), + } +) +public class PlaintextConsumerPollTest { + + public static final int BROKER_COUNT = 3; + public static final double EPSILON = 0.1; + public static final long GROUP_MAX_SESSION_TIMEOUT_MS = 60000L; + private final ClusterInstance cluster; + private final String topic = "topic"; + private final TopicPartition tp = new TopicPartition(topic, 0); + private final TopicPartition tp2 = new TopicPartition(topic, 1); + + public PlaintextConsumerPollTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @BeforeEach + public void setup() throws InterruptedException { + cluster.createTopic(topic, 2, (short) BROKER_COUNT); + } + + @ClusterTest + public void testClassicConsumerMaxPollRecords() throws InterruptedException { + testMaxPollRecords(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerMaxPollRecords() throws InterruptedException { + testMaxPollRecords(GroupProtocol.CONSUMER); + } + + private void testMaxPollRecords(GroupProtocol groupProtocol) throws InterruptedException { + var maxPollRecords = 100; + var numRecords = 5000; + Map config = Map.of( + MAX_POLL_RECORDS_CONFIG, maxPollRecords, + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT) + ); + var startingTimestamp = System.currentTimeMillis(); + sendRecords(cluster, tp, numRecords, startingTimestamp); + try (Consumer consumer = cluster.consumer(config)) { + consumer.assign(List.of(tp)); + consumeAndVerifyRecords( + consumer, + tp, + numRecords, + maxPollRecords, + 0, + 0, + startingTimestamp, + -1 + ); + } + } + + @ClusterTest + public void testClassicConsumerMaxPollIntervalMs() throws InterruptedException { + testMaxPollIntervalMs(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 1000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + HEARTBEAT_INTERVAL_MS_CONFIG, 500, + SESSION_TIMEOUT_MS_CONFIG, 2000 + )); + } + + @ClusterTest + public void testAsyncConsumerMaxPollIntervalMs() throws InterruptedException { + testMaxPollIntervalMs(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 1000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testMaxPollIntervalMs(Map config) throws InterruptedException { + try (Consumer consumer = cluster.consumer(config)) { + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(topic), listener); + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener); + assertEquals(1, listener.callsToAssigned); + assertEquals(0, listener.callsToRevoked); + + // after we extend longer than max.poll a rebalance should be triggered + // NOTE we need to have a relatively much larger value than max.poll to let heartbeat expired for sure + TimeUnit.MILLISECONDS.sleep(3000); + + awaitRebalance(consumer, listener); + assertEquals(2, listener.callsToAssigned); + assertEquals(1, listener.callsToRevoked); + } + } + + @ClusterTest + public void testClassicConsumerMaxPollIntervalMsDelayInRevocation() throws InterruptedException { + testMaxPollIntervalMsDelayInRevocation(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 5000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + HEARTBEAT_INTERVAL_MS_CONFIG, 500, + SESSION_TIMEOUT_MS_CONFIG, 1000, + ENABLE_AUTO_COMMIT_CONFIG, false + )); + } + + @ClusterTest + public void testAsyncConsumerMaxPollIntervalMsDelayInRevocation() throws InterruptedException { + testMaxPollIntervalMsDelayInRevocation(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 5000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, false + )); + } + + private void testMaxPollIntervalMsDelayInRevocation(Map config) throws InterruptedException { + var commitCompleted = new AtomicBoolean(false); + var committedPosition = new AtomicLong(-1); + + try (Consumer consumer = cluster.consumer(config)) { + var listener = new TestConsumerReassignmentListener() { + @Override + public void onPartitionsLost(Collection partitions) { + // no op + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + if (!partitions.isEmpty() && partitions.contains(tp)) { + // on the second rebalance (after we have joined the group initially), sleep longer + // than session timeout and then try a commit. We should still be in the group, + // so the commit should succeed + Utils.sleep(1500); + committedPosition.set(consumer.position(tp)); + var offsets = Map.of(tp, new OffsetAndMetadata(committedPosition.get())); + consumer.commitSync(offsets); + commitCompleted.set(true); + } + super.onPartitionsRevoked(partitions); + } + }; + consumer.subscribe(List.of(topic), listener); + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener); + + // force a rebalance to trigger an invocation of the revocation callback while in the group + consumer.subscribe(List.of("otherTopic"), listener); + awaitRebalance(consumer, listener); + + assertEquals(0, committedPosition.get()); + assertTrue(commitCompleted.get()); + } + } + + @ClusterTest + public void testClassicConsumerMaxPollIntervalMsDelayInAssignment() throws InterruptedException { + testMaxPollIntervalMsDelayInAssignment(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 5000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + HEARTBEAT_INTERVAL_MS_CONFIG, 500, + SESSION_TIMEOUT_MS_CONFIG, 1000, + ENABLE_AUTO_COMMIT_CONFIG, false + )); + } + + @ClusterTest + public void testAsyncConsumerMaxPollIntervalMsDelayInAssignment() throws InterruptedException { + testMaxPollIntervalMsDelayInAssignment(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 5000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, false + )); + } + + private void testMaxPollIntervalMsDelayInAssignment(Map config) throws InterruptedException { + try (Consumer consumer = cluster.consumer(config)) { + var listener = new TestConsumerReassignmentListener() { + @Override + public void onPartitionsAssigned(Collection partitions) { + // sleep longer than the session timeout, we should still be in the group after invocation + Utils.sleep(1500); + super.onPartitionsAssigned(partitions); + } + }; + consumer.subscribe(List.of(topic), listener); + // rebalance to get the initial assignment + awaitRebalance(consumer, listener); + // We should still be in the group after this invocation + ensureNoRebalance(consumer, listener); + } + } + + @ClusterTest + public void testClassicConsumerMaxPollIntervalMsShorterThanPollTimeout() throws InterruptedException { + testMaxPollIntervalMsShorterThanPollTimeout(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 1000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + HEARTBEAT_INTERVAL_MS_CONFIG, 500 + )); + } + + @ClusterTest + public void testAsyncConsumerMaxPollIntervalMsShorterThanPollTimeout() throws InterruptedException { + testMaxPollIntervalMsShorterThanPollTimeout(Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 1000, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testMaxPollIntervalMsShorterThanPollTimeout(Map config) throws InterruptedException { + try (Consumer consumer = cluster.consumer(config)) { + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(topic), listener); + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener); + var callsToAssignedAfterFirstRebalance = listener.callsToAssigned; + + consumer.poll(Duration.ofMillis(2000)); + // If the poll above times out, it would trigger a rebalance. + // Leave some time for the rebalance to happen and check for the rebalance event. + consumer.poll(Duration.ofMillis(500)); + consumer.poll(Duration.ofMillis(500)); + + assertEquals(callsToAssignedAfterFirstRebalance, listener.callsToAssigned); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLeadWithMaxPollRecords() throws InterruptedException { + testPerPartitionLeadWithMaxPollRecords(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLeadWithMaxPollRecords() throws InterruptedException { + testPerPartitionLeadWithMaxPollRecords(GroupProtocol.CONSUMER); + } + + private void testPerPartitionLeadWithMaxPollRecords(GroupProtocol groupProtocol) throws InterruptedException { + int numMessages = 1000; + int maxPollRecords = 10; + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords", + CLIENT_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords", + MAX_POLL_RECORDS_CONFIG, maxPollRecords + ); + + sendRecords(cluster, tp, numMessages); + try (Consumer consumer = cluster.consumer(config)) { + consumer.assign(List.of(tp)); + awaitNonEmptyRecords(consumer, tp, 100); + + var tags = Map.of( + "client-id", "testPerPartitionLeadWithMaxPollRecords", + "topic", tp.topic(), + "partition", String.valueOf(tp.partition()) + ); + var lead = consumer.metrics() + .get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)); + assertEquals(maxPollRecords, (Double) lead.metricValue(), "The lead should be " + maxPollRecords); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLagWithMaxPollRecords() throws InterruptedException { + testPerPartitionLagWithMaxPollRecords(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLagWithMaxPollRecords() throws InterruptedException { + testPerPartitionLagWithMaxPollRecords(GroupProtocol.CONSUMER); + } + + private void testPerPartitionLagWithMaxPollRecords(GroupProtocol groupProtocol) throws InterruptedException { + int numMessages = 1000; + int maxPollRecords = 10; + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords", + CLIENT_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords", + MAX_POLL_RECORDS_CONFIG, maxPollRecords + ); + sendRecords(cluster, tp, numMessages); + try (Consumer consumer = cluster.consumer(config)) { + consumer.assign(List.of(tp)); + var records = awaitNonEmptyRecords(consumer, tp, 100); + + var tags = Map.of( + "client-id", "testPerPartitionLagWithMaxPollRecords", + "topic", tp.topic(), + "partition", String.valueOf(tp.partition()) + ); + var lag = consumer.metrics() + .get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)); + + // Count the number of records received + var recordCount = records.count(); + assertEquals( + numMessages - recordCount, + (Double) lag.metricValue(), + EPSILON, + "The lag should be " + (numMessages - recordCount) + ); + } + } + + @ClusterTest + public void runCloseClassicConsumerMultiConsumerSessionTimeoutTest() throws InterruptedException { + runMultiConsumerSessionTimeoutTest(GroupProtocol.CLASSIC, true); + } + + @ClusterTest + public void runClassicConsumerMultiConsumerSessionTimeoutTest() throws InterruptedException { + runMultiConsumerSessionTimeoutTest(GroupProtocol.CLASSIC, false); + } + + @ClusterTest + public void runCloseAsyncConsumerMultiConsumerSessionTimeoutTest() throws InterruptedException { + runMultiConsumerSessionTimeoutTest(GroupProtocol.CONSUMER, true); + } + + @ClusterTest + public void runAsyncConsumerMultiConsumerSessionTimeoutTest() throws InterruptedException { + runMultiConsumerSessionTimeoutTest(GroupProtocol.CONSUMER, false); + } + + private void runMultiConsumerSessionTimeoutTest(GroupProtocol groupProtocol, boolean closeConsumer) throws InterruptedException { + String topic1 = "topic1"; + int partitions = 6; + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, "test-group", + MAX_POLL_INTERVAL_MS_CONFIG, 100 + ); + // use consumers defined in this class plus one additional consumer + // Use topic defined in this class + one additional topic + try (Producer producer = cluster.producer(); + // create one more consumer and add it to the group; we will time out this consumer + Consumer consumer1 = cluster.consumer(config); + Consumer consumer2 = cluster.consumer(config); + Consumer timeoutConsumer = cluster.consumer(config) + ) { + sendRecords(producer, tp, 100, System.currentTimeMillis(), -1); + sendRecords(producer, tp2, 100, System.currentTimeMillis(), -1); + + Set subscriptions = new HashSet<>(); + subscriptions.add(tp); + subscriptions.add(tp2); + + cluster.createTopic(topic1, partitions, (short) BROKER_COUNT); + IntStream.range(0, partitions).forEach(partition -> { + TopicPartition topicPartition = new TopicPartition(topic1, partition); + sendRecords(producer, topicPartition, 100, System.currentTimeMillis(), -1); + subscriptions.add(topicPartition); + }); + + // first subscribe consumers that are defined in this class + List consumerPollers = new ArrayList<>(); + try { + consumerPollers.add(subscribeConsumerAndStartPolling(consumer1, List.of(topic, topic1))); + consumerPollers.add(subscribeConsumerAndStartPolling(consumer2, List.of(topic, topic1))); + + ConsumerAssignmentPoller timeoutPoller = subscribeConsumerAndStartPolling(timeoutConsumer, List.of(topic, topic1)); + consumerPollers.add(timeoutPoller); + + // validate the initial assignment + validateGroupAssignment(consumerPollers, subscriptions, null); + + // stop polling and close one of the consumers, should trigger partition re-assignment among alive consumers + timeoutPoller.shutdown(); + consumerPollers.remove(timeoutPoller); + if (closeConsumer) + timeoutConsumer.close(); + + validateGroupAssignment(consumerPollers, subscriptions, + "Did not get valid assignment for partitions " + subscriptions + " after one consumer left"); + } finally { + // done with pollers and consumers + for (ConsumerAssignmentPoller poller : consumerPollers) + poller.shutdown(); + } + } + } + + @ClusterTest + public void testClassicConsumerPollEventuallyReturnsRecordsWithZeroTimeout() throws InterruptedException { + testPollEventuallyReturnsRecordsWithZeroTimeout(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPollEventuallyReturnsRecordsWithZeroTimeout() throws InterruptedException { + testPollEventuallyReturnsRecordsWithZeroTimeout(GroupProtocol.CONSUMER); + } + + private void testPollEventuallyReturnsRecordsWithZeroTimeout(GroupProtocol groupProtocol) throws InterruptedException { + int numMessages = 100; + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + sendRecords(cluster, tp, numMessages); + + try (Consumer consumer = cluster.consumer(config)) { + consumer.subscribe(List.of(topic)); + var records = awaitNonEmptyRecords(consumer, tp, 0L); + assertEquals(numMessages, records.count()); + } + + } + + @ClusterTest + public void testClassicConsumerNoOffsetForPartitionExceptionOnPollZero() throws InterruptedException { + testNoOffsetForPartitionExceptionOnPollZero(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerNoOffsetForPartitionExceptionOnPollZero() throws InterruptedException { + testNoOffsetForPartitionExceptionOnPollZero(GroupProtocol.CONSUMER); + } + + private void testNoOffsetForPartitionExceptionOnPollZero(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + AUTO_OFFSET_RESET_CONFIG, "none" + ); + try (Consumer consumer = cluster.consumer(config)) { + consumer.assign(List.of(tp)); + + // continuous poll should eventually fail because there is no offset reset strategy set + // (fail only when resetting positions after coordinator is known) + waitForPollThrowException(consumer, NoOffsetForPartitionException.class); + } + } + + @ClusterTest + public void testClassicConsumerRecoveryOnPollAfterDelayedRebalance() throws InterruptedException { + testConsumerRecoveryOnPollAfterDelayedRebalance(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerRecoveryOnPollAfterDelayedRebalance() throws InterruptedException { + testConsumerRecoveryOnPollAfterDelayedRebalance(GroupProtocol.CONSUMER); + } + + public void testConsumerRecoveryOnPollAfterDelayedRebalance(GroupProtocol groupProtocol) throws InterruptedException { + var rebalanceTimeout = 1000; + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + MAX_POLL_INTERVAL_MS_CONFIG, 1000, + ENABLE_AUTO_COMMIT_CONFIG, false + ); + try (Producer producer = cluster.producer(); + // Subscribe consumer that will reconcile in time on the first rebalance, but will + // take longer than the allowed timeout in the second rebalance (onPartitionsRevoked) to get fenced by the broker. + // The consumer should recover after being fenced (automatically rejoin the group on the next call to poll) + Consumer consumer = cluster.consumer(config) + ) { + var numMessages = 10; + var otherTopic = "otherTopic"; + var tpOther = new TopicPartition(otherTopic, 0); + cluster.createTopic(otherTopic, 1, (short) BROKER_COUNT); + sendRecords(producer, tpOther, numMessages, System.currentTimeMillis(), -1); + sendRecords(producer, tp, numMessages, System.currentTimeMillis(), -1); + + var rebalanceTimeoutExceeded = new AtomicBoolean(false); + + var listener = new TestConsumerReassignmentListener() { + @Override + public void onPartitionsRevoked(Collection partitions) { + if (!partitions.isEmpty() && partitions.contains(tp)) { + // on the second rebalance (after we have joined the group initially), sleep longer + // than rebalance timeout to get fenced. + Utils.sleep(rebalanceTimeout + 500); + rebalanceTimeoutExceeded.set(true); + } + super.onPartitionsRevoked(partitions); + } + }; + // Subscribe to get first assignment (no delays) and verify consumption + consumer.subscribe(List.of(topic), listener); + var records = awaitNonEmptyRecords(consumer, tp, 0L); + assertEquals(numMessages, records.count()); + + // Subscribe to different topic. This will trigger the delayed revocation exceeding rebalance timeout and get fenced + consumer.subscribe(List.of(otherTopic), listener); + ClientsTestUtils.pollUntilTrue( + consumer, + rebalanceTimeoutExceeded::get, + "Timeout waiting for delayed callback to complete" + ); + + // Verify consumer recovers after being fenced, being able to continue consuming. + // (The member should automatically rejoin on the next poll, with the new topic as subscription) + records = awaitNonEmptyRecords(consumer, tpOther, 0L); + assertEquals(numMessages, records.count()); + } + } + + /** + * Subscribes consumer 'consumer' to a given list of topics 'topicsToSubscribe', creates + * consumer poller and starts polling. + * Assumes that the consumer is not subscribed to any topics yet + * + * @param topicsToSubscribe topics that this consumer will subscribe to + * @return consumer poller for the given consumer + */ + private ConsumerAssignmentPoller subscribeConsumerAndStartPolling( + Consumer consumer, + List topicsToSubscribe + ) { + assertEquals(0, consumer.assignment().size()); + ConsumerAssignmentPoller consumerPoller; + consumerPoller = new ConsumerAssignmentPoller(consumer, topicsToSubscribe); + consumerPoller.start(); + return consumerPoller; + } + + /** + * Check whether partition assignment is valid + * Assumes partition assignment is valid iff + * 1. Every consumer got assigned at least one partition + * 2. Each partition is assigned to only one consumer + * 3. Every partition is assigned to one of the consumers + * + * @param assignments set of consumer assignments; one per each consumer + * @param partitions set of partitions that consumers subscribed to + * @return true if partition assignment is valid + */ + private boolean isPartitionAssignmentValid( + List> assignments, + Set partitions + ) { + // check that all consumers got at least one partition + var allNonEmptyAssignments = assignments + .stream() + .noneMatch(Set::isEmpty); + + if (!allNonEmptyAssignments) { + // at least one consumer got empty assignment + return false; + } + + // make sure that sum of all partitions to all consumers equals total number of partitions + var totalPartitionsInAssignments = 0; + for (var assignment : assignments) { + totalPartitionsInAssignments += assignment.size(); + } + + if (totalPartitionsInAssignments != partitions.size()) { + // either same partitions got assigned to more than one consumer or some + // partitions were not assigned + return false; + } + + // The above checks could miss the case where one or more partitions were assigned to more + // than one consumer and the same number of partitions were missing from assignments. + // Make sure that all unique assignments are the same as 'partitions' + var uniqueAssignedPartitions = new HashSet<>(); + for (var assignment : assignments) { + uniqueAssignedPartitions.addAll(assignment); + } + + return uniqueAssignedPartitions.equals(partitions); + } + + /** + * Wait for consumers to get partition assignment and validate it. + * + * @param consumerPollers consumer pollers corresponding to the consumer group we are testing + * @param subscriptions set of all topic partitions + * @param msg message to print when waiting for/validating assignment fails + */ + private void validateGroupAssignment( + List consumerPollers, + Set subscriptions, + String msg + ) throws InterruptedException { + List> assignments = new ArrayList<>(); + TestUtils.waitForCondition(() -> { + assignments.clear(); + for (ConsumerAssignmentPoller poller : consumerPollers) { + assignments.add(poller.consumerAssignment()); + } + return isPartitionAssignmentValid(assignments, subscriptions); + }, GROUP_MAX_SESSION_TIMEOUT_MS * 3, + msg != null ? msg : "Did not get valid assignment for partitions " + subscriptions + ". Instead, got " + assignments + ); + } + + private ConsumerRecords awaitNonEmptyRecords( + Consumer consumer, + TopicPartition partition, + long pollTimeoutMs + ) throws InterruptedException { + List> result = new ArrayList<>(); + TestUtils.waitForCondition(() -> { + var records = consumer.poll(Duration.ofMillis(pollTimeoutMs)); + result.add(records); + return !records.records(partition).isEmpty(); + }, "Consumer did not consume any messages for partition " + partition + " before timeout."); + return result.get(result.size() - 1); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerSubscriptionTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerSubscriptionTest.java new file mode 100644 index 0000000000000..ff840ebf2ea7a --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerSubscriptionTest.java @@ -0,0 +1,628 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.ClientsTestUtils.TestConsumerReassignmentListener; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.InvalidRegularExpression; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.BeforeEach; + +import java.time.Duration; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +import static org.apache.kafka.clients.ClientsTestUtils.awaitAssignment; +import static org.apache.kafka.clients.ClientsTestUtils.awaitRebalance; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; +import static org.apache.kafka.clients.ClientsTestUtils.waitForPollThrowException; +import static org.apache.kafka.clients.CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.METADATA_MAX_AGE_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = PlaintextConsumerSubscriptionTest.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, value = "60000"), + @ClusterConfigProperty(key = GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "10"), + } +) +public class PlaintextConsumerSubscriptionTest { + + public static final int BROKER_COUNT = 3; + private final ClusterInstance cluster; + private final String topic = "topic"; + private final TopicPartition tp = new TopicPartition(topic, 0); + + public PlaintextConsumerSubscriptionTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @BeforeEach + public void setup() throws InterruptedException { + cluster.createTopic(topic, 2, (short) BROKER_COUNT); + } + + @ClusterTest + public void testClassicConsumerPatternSubscription() throws InterruptedException { + testPatternSubscription(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPatternSubscription() throws InterruptedException { + testPatternSubscription(GroupProtocol.CONSUMER); + } + + /** + * Verifies that pattern subscription performs as expected. + * The pattern matches the topics 'topic' and 'tblablac', but not 'tblablak' or 'tblab1'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and 'tblablac' after the subscription + * when metadata is refreshed. + * When a new topic 'tsomec' is added afterward, it is expected that upon the next metadata refresh the consumer + * becomes subscribed to this new topic and all partitions of that topic are assigned to it. + */ + public void testPatternSubscription(GroupProtocol groupProtocol) throws InterruptedException { + var numRecords = 10000; + Map config = Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 6000, + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, false, + METADATA_MAX_AGE_CONFIG, 100 + ); + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(config) + ) { + sendRecords(producer, tp, numRecords, System.currentTimeMillis()); + + var topic1 = "tblablac"; // matches subscribed pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(topic1, 0), 1000, System.currentTimeMillis()); + sendRecords(producer, new TopicPartition(topic1, 1), 1000, System.currentTimeMillis()); + + var topic2 = "tblablak"; // does not match subscribed pattern + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(topic2, 0), 1000, System.currentTimeMillis()); + sendRecords(producer, new TopicPartition(topic2, 1), 1000, System.currentTimeMillis()); + + var topic3 = "tblab1"; // does not match subscribed pattern + cluster.createTopic(topic3, 2, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(topic3, 0), 1000, System.currentTimeMillis()); + sendRecords(producer, new TopicPartition(topic3, 1), 1000, System.currentTimeMillis()); + + assertEquals(0, consumer.assignment().size()); + var pattern = Pattern.compile("t.*c"); + consumer.subscribe(pattern, new TestConsumerReassignmentListener()); + + Set assignment = new HashSet<>(); + assignment.add(new TopicPartition(topic, 0)); + assignment.add(new TopicPartition(topic, 1)); + assignment.add(new TopicPartition(topic1, 0)); + assignment.add(new TopicPartition(topic1, 1)); + + awaitAssignment(consumer, assignment); + + var topic4 = "tsomec"; // matches subscribed pattern + cluster.createTopic(topic4, 2, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(topic4, 0), 1000, System.currentTimeMillis()); + sendRecords(producer, new TopicPartition(topic4, 1), 1000, System.currentTimeMillis()); + + assignment.add(new TopicPartition(topic4, 0)); + assignment.add(new TopicPartition(topic4, 1)); + + awaitAssignment(consumer, assignment); + + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + } + } + + @ClusterTest + public void testClassicConsumerSubsequentPatternSubscription() throws InterruptedException { + testSubsequentPatternSubscription(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerSubsequentPatternSubscription() throws InterruptedException { + testSubsequentPatternSubscription(GroupProtocol.CONSUMER); + } + + /** + * Verifies that a second call to pattern subscription succeeds and performs as expected. + * The initial subscription is to a pattern that matches two topics 'topic' and 'foo'. + * The second subscription is to a pattern that matches 'foo' and a new topic 'bar'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and 'foo' after + * the first subscription, and to all partitions of 'foo' and 'bar' after the second. + * The metadata refresh interval is intentionally increased to a large enough value to guarantee + * that it is the subscription call that triggers a metadata refresh, and not the timeout. + */ + public void testSubsequentPatternSubscription(GroupProtocol groupProtocol) throws InterruptedException { + var numRecords = 10000; + Map config = Map.of( + MAX_POLL_INTERVAL_MS_CONFIG, 6000, + GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT), + ENABLE_AUTO_COMMIT_CONFIG, false, + METADATA_MAX_AGE_CONFIG, 30000 + ); + try (Consumer consumer = cluster.consumer(config); + Producer producer = cluster.producer() + ) { + sendRecords(producer, tp, numRecords, System.currentTimeMillis()); + + // the first topic ('topic') matches first subscription pattern only + var fooTopic = "foo"; // matches both subscription patterns + cluster.createTopic(fooTopic, 1, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(fooTopic, 0), 1000, System.currentTimeMillis()); + + assertEquals(0, consumer.assignment().size()); + + var pattern = Pattern.compile(".*o.*"); // only 'topic' and 'foo' match this + consumer.subscribe(pattern, new TestConsumerReassignmentListener()); + + Set assignment = new HashSet<>(); + assignment.add(new TopicPartition(topic, 0)); + assignment.add(new TopicPartition(topic, 1)); + assignment.add(new TopicPartition(fooTopic, 0)); + + awaitAssignment(consumer, assignment); + + var barTopic = "bar"; // matches the next subscription pattern + cluster.createTopic(barTopic, 1, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(barTopic, 0), 1000, System.currentTimeMillis()); + + var pattern2 = Pattern.compile("..."); // only 'foo' and 'bar' match this + consumer.subscribe(pattern2, new TestConsumerReassignmentListener()); + + // Remove topic partitions from assignment + assignment.remove(new TopicPartition(topic, 0)); + assignment.remove(new TopicPartition(topic, 1)); + + // Add bar topic partition to assignment + assignment.add(new TopicPartition(barTopic, 0)); + + awaitAssignment(consumer, assignment); + + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + } + } + + @ClusterTest + public void testClassicConsumerPatternUnsubscription() throws InterruptedException { + testPatternUnsubscription(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerPatternUnsubscription() throws InterruptedException { + testPatternUnsubscription(GroupProtocol.CONSUMER); + } + + /** + * Verifies that pattern unsubscription performs as expected. + * The pattern matches the topics 'topic' and 'tblablac'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and 'tblablac' after the subscription + * when metadata is refreshed. + * When consumer unsubscribes from all its subscriptions, it is expected that its assignments are cleared right away. + */ + public void testPatternUnsubscription(GroupProtocol groupProtocol) throws InterruptedException { + var numRecords = 10000; + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(config) + ) { + sendRecords(producer, tp, numRecords, System.currentTimeMillis()); + + var topic1 = "tblablac"; // matches the subscription pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + sendRecords(producer, new TopicPartition(topic1, 0), 1000, System.currentTimeMillis()); + sendRecords(producer, new TopicPartition(topic1, 1), 1000, System.currentTimeMillis()); + + assertEquals(0, consumer.assignment().size()); + + consumer.subscribe(Pattern.compile("t.*c"), new TestConsumerReassignmentListener()); + + Set assignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1) + ); + awaitAssignment(consumer, assignment); + + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + } + } + + @ClusterTest + public void testAsyncConsumerRe2JPatternSubscription() throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var topic1 = "tblablac"; // matches subscribed pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + + var topic2 = "tblablak"; // does not match subscribed pattern + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + + var topic3 = "tblab1"; // does not match subscribed pattern + cluster.createTopic(topic3, 2, (short) BROKER_COUNT); + + assertEquals(0, consumer.assignment().size()); + var pattern = new SubscriptionPattern("t.*c"); + consumer.subscribe(pattern); + + Set assignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1) + ); + awaitAssignment(consumer, assignment); + + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + // Subscribe to a different pattern to match topic2 (that did not match before) + pattern = new SubscriptionPattern(topic2 + ".*"); + consumer.subscribe(pattern); + + assignment = Set.of( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1) + ); + awaitAssignment(consumer, assignment); + } + } + + @ClusterTest + public void testAsyncConsumerRe2JPatternSubscriptionFetch() throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var topic1 = "topic1"; // matches subscribed pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + assertEquals(0, consumer.assignment().size()); + + var pattern = new SubscriptionPattern("topic.*"); + consumer.subscribe(pattern); + + Set assignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1) + ); + awaitAssignment(consumer, assignment); + + var totalRecords = 10; + var startingTimestamp = System.currentTimeMillis(); + var tp = new TopicPartition(topic1, 0); + sendRecords(cluster, tp, totalRecords, startingTimestamp); + consumeAndVerifyRecords(consumer, tp, totalRecords, 0, 0, startingTimestamp); + } + } + + @ClusterTest + public void testAsyncConsumerRe2JPatternExpandSubscription() throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var topic1 = "topic1"; // matches first pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + + var topic2 = "topic2"; // does not match first pattern + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + + assertEquals(0, consumer.assignment().size()); + var pattern = new SubscriptionPattern("topic1.*"); + consumer.subscribe(pattern); + + Set assignment = Set.of( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1) + ); + awaitAssignment(consumer, assignment); + + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + + // Subscribe to a different pattern that should match + // the same topics the member already had plus new ones + pattern = new SubscriptionPattern("topic1|topic2"); + consumer.subscribe(pattern); + + Set expandedAssignment = new HashSet<>(assignment); + expandedAssignment.add(new TopicPartition(topic2, 0)); + expandedAssignment.add(new TopicPartition(topic2, 1)); + awaitAssignment(consumer, expandedAssignment); + } + } + + @ClusterTest + public void testTopicIdSubscriptionWithRe2JRegexAndOffsetsFetch() throws InterruptedException { + var topic1 = "topic1"; // matches subscribed pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try ( + Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(config) + ) { + assertEquals(0, consumer.assignment().size()); + + // Subscribe to broker-side regex and fetch. This will require metadata for topic IDs. + var pattern = new SubscriptionPattern("topic.*"); + consumer.subscribe(pattern); + var assignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)); + awaitAssignment(consumer, assignment); + var totalRecords = 10; + var startingTimestamp = System.currentTimeMillis(); + var tp = new TopicPartition(topic1, 0); + sendRecords(producer, tp, totalRecords, startingTimestamp); + consumeAndVerifyRecords(consumer, tp, totalRecords, 0, 0, startingTimestamp); + + // Fetch offsets for known and unknown topics. This will require metadata for topic names temporarily (transient topics) + var topic2 = "newTopic2"; + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + var unassignedPartition = new TopicPartition(topic2, 0); + var offsets = consumer.endOffsets(List.of(unassignedPartition, tp)); + var expectedOffsets = Map.of( + unassignedPartition, 0L, + tp, (long) totalRecords); + assertEquals(expectedOffsets, offsets); + + // Fetch records again with the regex subscription. This will require metadata for topic IDs again. + sendRecords(producer, tp, totalRecords, startingTimestamp); + consumeAndVerifyRecords(consumer, tp, totalRecords, totalRecords, 0, startingTimestamp); + } + } + + @ClusterTest + public void testRe2JPatternSubscriptionAndTopicSubscription() throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var topic1 = "topic1"; // matches subscribed pattern + cluster.createTopic(topic1, 2, (short) BROKER_COUNT); + + var topic11 = "topic11"; // matches subscribed pattern + cluster.createTopic(topic11, 2, (short) BROKER_COUNT); + + var topic2 = "topic2"; // does not match subscribed pattern + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + + assertEquals(0, consumer.assignment().size()); + // Subscribe to pattern + var pattern = new SubscriptionPattern("topic1.*"); + consumer.subscribe(pattern); + + Set patternAssignment = Set.of( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1), + new TopicPartition(topic11, 0), + new TopicPartition(topic11, 1) + ); + awaitAssignment(consumer, patternAssignment); + consumer.unsubscribe(); + assertEquals(0, consumer.assignment().size()); + + // Subscribe to explicit topic names + consumer.subscribe(List.of(topic2)); + + Set assignment = Set.of( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1) + ); + awaitAssignment(consumer, assignment); + consumer.unsubscribe(); + + // Subscribe to pattern again + consumer.subscribe(pattern); + awaitAssignment(consumer, patternAssignment); + } + } + + + @ClusterTest + public void testRe2JPatternSubscriptionInvalidRegex() throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + assertEquals(0, consumer.assignment().size()); + + var pattern = new SubscriptionPattern("(t.*c"); + consumer.subscribe(pattern); + + waitForPollThrowException(consumer, InvalidRegularExpression.class); + consumer.unsubscribe(); + } + } + + @ClusterTest + public void testClassicConsumerExpandingTopicSubscriptions() throws InterruptedException { + testExpandingTopicSubscriptions(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerExpandingTopicSubscriptions() throws InterruptedException { + testExpandingTopicSubscriptions(GroupProtocol.CONSUMER); + } + + public void testExpandingTopicSubscriptions(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var otherTopic = "other"; + + Set initialAssignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1) + ); + consumer.subscribe(List.of(topic)); + awaitAssignment(consumer, initialAssignment); + + cluster.createTopic(otherTopic, 2, (short) BROKER_COUNT); + + Set expandedAssignment = new HashSet<>(initialAssignment); + expandedAssignment.add(new TopicPartition(otherTopic, 0)); + expandedAssignment.add(new TopicPartition(otherTopic, 1)); + + consumer.subscribe(List.of(topic, otherTopic)); + awaitAssignment(consumer, expandedAssignment); + } + } + + @ClusterTest + public void testClassicConsumerShrinkingTopicSubscriptions() throws InterruptedException { + testShrinkingTopicSubscriptions(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerShrinkingTopicSubscriptions() throws InterruptedException { + testShrinkingTopicSubscriptions(GroupProtocol.CONSUMER); + } + + public void testShrinkingTopicSubscriptions(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + var otherTopic = "other"; + cluster.createTopic(otherTopic, 2, (short) BROKER_COUNT); + + Set initialAssignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(otherTopic, 0), + new TopicPartition(otherTopic, 1) + ); + consumer.subscribe(List.of(topic, otherTopic)); + awaitAssignment(consumer, initialAssignment); + + Set shrunkenAssignment = Set.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1) + ); + consumer.subscribe(List.of(topic)); + awaitAssignment(consumer, shrunkenAssignment); + } + } + + @ClusterTest + public void testClassicConsumerUnsubscribeTopic() throws InterruptedException { + testUnsubscribeTopic(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + SESSION_TIMEOUT_MS_CONFIG, 100, // timeout quickly to avoid slow test + HEARTBEAT_INTERVAL_MS_CONFIG, 30 + )); + } + + @ClusterTest + public void testAsyncConsumerUnsubscribeTopic() throws InterruptedException { + testUnsubscribeTopic(Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT))); + } + + public void testUnsubscribeTopic(Map config) throws InterruptedException { + try (Consumer consumer = cluster.consumer(config)) { + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(topic), listener); + + // the initial subscription should cause a callback execution + awaitRebalance(consumer, listener); + + consumer.subscribe(List.of()); + assertEquals(0, consumer.assignment().size()); + } + } + + @ClusterTest + public void testClassicConsumerSubscribeInvalidTopicCanUnsubscribe() throws InterruptedException { + testSubscribeInvalidTopicCanUnsubscribe(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerSubscribeInvalidTopicCanUnsubscribe() throws InterruptedException { + testSubscribeInvalidTopicCanUnsubscribe(GroupProtocol.CONSUMER); + } + + public void testSubscribeInvalidTopicCanUnsubscribe(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + setupSubscribeInvalidTopic(consumer); + assertDoesNotThrow(consumer::unsubscribe); + } + } + + @ClusterTest + public void testClassicConsumerSubscribeInvalidTopicCanClose() throws InterruptedException { + testSubscribeInvalidTopicCanClose(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerSubscribeInvalidTopicCanClose() throws InterruptedException { + testSubscribeInvalidTopicCanClose(GroupProtocol.CONSUMER); + } + + public void testSubscribeInvalidTopicCanClose(GroupProtocol groupProtocol) throws InterruptedException { + Map config = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + try (Consumer consumer = cluster.consumer(config)) { + setupSubscribeInvalidTopic(consumer); + assertDoesNotThrow(() -> consumer.close()); + } + } + + private void setupSubscribeInvalidTopic(Consumer consumer) throws InterruptedException { + // Invalid topic name due to space + var invalidTopicName = "topic abc"; + consumer.subscribe(List.of(invalidTopicName)); + + InvalidTopicException[] exception = {null}; + TestUtils.waitForCondition(() -> { + try { + consumer.poll(Duration.ofMillis(500)); + } catch (InvalidTopicException e) { + exception[0] = e; + } catch (Throwable e) { + fail("An InvalidTopicException should be thrown. But " + e.getClass() + " is thrown"); + } + return exception[0] != null; + }, 5000, "An InvalidTopicException should be thrown."); + + assertEquals("Invalid topics: [" + invalidTopicName + "]", exception[0].getMessage()); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerTest.java new file mode 100644 index 0000000000000..bd92f0c56851e --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/PlaintextConsumerTest.java @@ -0,0 +1,1726 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import kafka.server.KafkaBroker; + +import org.apache.kafka.clients.ClientsTestUtils.TestConsumerReassignmentListener; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidGroupIdException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Flaky; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.server.quota.QuotaType; +import org.apache.kafka.test.MockConsumerInterceptor; +import org.apache.kafka.test.MockProducerInterceptor; +import org.apache.kafka.test.TestUtils; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.IntStream; + +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.BROKER_COUNT; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.TOPIC; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.TP; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testClusterResourceListener; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testCoordinatorFailover; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testSimpleConsumption; +import static org.apache.kafka.clients.ClientsTestUtils.awaitAssignment; +import static org.apache.kafka.clients.ClientsTestUtils.awaitRebalance; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecords; +import static org.apache.kafka.clients.ClientsTestUtils.consumeAndVerifyRecordsWithTimeTypeLogAppend; +import static org.apache.kafka.clients.ClientsTestUtils.consumeRecords; +import static org.apache.kafka.clients.ClientsTestUtils.sendAndAwaitAsyncCommit; +import static org.apache.kafka.clients.ClientsTestUtils.sendRecords; +import static org.apache.kafka.clients.CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.METADATA_MAX_AGE_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.CLIENT_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_INSTANCE_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.ISOLATION_LEVEL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.LINGER_MS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, value = "60000"), + @ClusterConfigProperty(key = GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "10"), + } +) +public class PlaintextConsumerTest { + + private final ClusterInstance cluster; + public static final double EPSILON = 0.1; + + public PlaintextConsumerTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @ClusterTest + public void testClassicConsumerSimpleConsumption() throws InterruptedException { + testSimpleConsumption(cluster, Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerSimpleConsumption() throws InterruptedException { + testSimpleConsumption(cluster, Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testClassicConsumerClusterResourceListener() throws InterruptedException { + testClusterResourceListener(cluster, Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerClusterResourceListener() throws InterruptedException { + testClusterResourceListener(cluster, Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testClassicConsumerCoordinatorFailover() throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + SESSION_TIMEOUT_MS_CONFIG, 5001, + HEARTBEAT_INTERVAL_MS_CONFIG, 1000, + // Use higher poll timeout to avoid consumer leaving the group due to timeout + MAX_POLL_INTERVAL_MS_CONFIG, 15000 + ); + testCoordinatorFailover(cluster, config); + } + + @ClusterTest + public void testAsyncConsumeCoordinatorFailover() throws InterruptedException { + Map config = Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + // Use higher poll timeout to avoid consumer leaving the group due to timeout + MAX_POLL_INTERVAL_MS_CONFIG, 15000 + ); + testCoordinatorFailover(cluster, config); + } + + @ClusterTest + public void testClassicConsumerHeaders() throws Exception { + testHeaders(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerHeaders() throws Exception { + testHeaders(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testHeaders(Map consumerConfig) throws Exception { + var numRecords = 1; + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var record = new ProducerRecord<>(TP.topic(), TP.partition(), null, "key".getBytes(), "value".getBytes()); + record.headers().add("headerKey", "headerValue".getBytes()); + record.headers().add("headerKey2", "headerValue2".getBytes()); + record.headers().add("headerKey3", "headerValue3".getBytes()); + producer.send(record); + producer.flush(); + + assertEquals(0, consumer.assignment().size()); + consumer.assign(List.of(TP)); + assertEquals(1, consumer.assignment().size()); + + consumer.seek(TP, 0); + var records = consumeRecords(consumer, numRecords); + assertEquals(numRecords, records.size()); + + var header = records.get(0).headers().lastHeader("headerKey"); + assertEquals("headerValue", header == null ? null : new String(header.value())); + + // Test the order of headers in a record is preserved when producing and consuming + Header[] headers = records.get(0).headers().toArray(); + assertEquals("headerKey", headers[0].key()); + assertEquals("headerKey2", headers[1].key()); + assertEquals("headerKey3", headers[2].key()); + } + } + + @ClusterTest + public void testClassicConsumerHeadersSerializerDeserializer() throws Exception { + testHeadersSerializeDeserialize(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerHeadersSerializerDeserializer() throws Exception { + testHeadersSerializeDeserialize(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testHeadersSerializeDeserialize(Map config) throws InterruptedException { + var numRecords = 1; + Map consumerConfig = new HashMap<>(config); + consumerConfig.put(VALUE_DESERIALIZER_CLASS_CONFIG, DeserializerImpl.class); + Map producerConfig = Map.of( + VALUE_SERIALIZER_CLASS_CONFIG, SerializerImpl.class.getName() + ); + + try (Producer producer = cluster.producer(producerConfig); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + producer.send(new ProducerRecord<>( + TP.topic(), + TP.partition(), + null, + "key".getBytes(), + "value".getBytes()) + ); + + assertEquals(0, consumer.assignment().size()); + consumer.assign(List.of(TP)); + assertEquals(1, consumer.assignment().size()); + + consumer.seek(TP, 0); + assertEquals(numRecords, consumeRecords(consumer, numRecords).size()); + } + } + + @ClusterTest + public void testClassicConsumerAutoOffsetReset() throws Exception { + testAutoOffsetReset(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerAutoOffsetReset() throws Exception { + testAutoOffsetReset(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testAutoOffsetReset(Map consumerConfig) throws Exception { + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, 1, startingTimestamp); + consumer.assign(List.of(TP)); + consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp); + } + } + + @ClusterTest + public void testClassicConsumerGroupConsumption() throws Exception { + testGroupConsumption(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerGroupConsumption() throws Exception { + testGroupConsumption(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testGroupConsumption(Map consumerConfig) throws Exception { + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, 10, startingTimestamp); + consumer.subscribe(List.of(TOPIC)); + consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp); + } + } + + @ClusterTest + public void testClassicConsumerPartitionsFor() throws Exception { + testPartitionsFor(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPartitionsFor() throws Exception { + testPartitionsFor(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPartitionsFor(Map consumerConfig) throws Exception { + var numParts = 2; + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + cluster.createTopic("part-test", numParts, (short) 1); + + try (var consumer = cluster.consumer(consumerConfig)) { + var partitions = consumer.partitionsFor(TOPIC); + assertNotNull(partitions); + assertEquals(2, partitions.size()); + } + } + + @ClusterTest + public void testClassicConsumerPartitionsForAutoCreate() throws Exception { + testPartitionsForAutoCreate(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPartitionsForAutoCreate() throws Exception { + testPartitionsForAutoCreate(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPartitionsForAutoCreate(Map consumerConfig) throws Exception { + try (var consumer = cluster.consumer(consumerConfig)) { + // First call would create the topic + consumer.partitionsFor("non-exist-topic"); + TestUtils.waitForCondition( + () -> !consumer.partitionsFor("non-exist-topic").isEmpty(), + "Timed out while awaiting non empty partitions." + ); + } + } + + @ClusterTest + public void testClassicConsumerPartitionsForInvalidTopic() { + testPartitionsForInvalidTopic(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPartitionsForInvalidTopic() { + testPartitionsForInvalidTopic(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPartitionsForInvalidTopic(Map consumerConfig) { + try (var consumer = cluster.consumer(consumerConfig)) { + assertThrows(InvalidTopicException.class, () -> consumer.partitionsFor(";3# ads,{234")); + } + } + + @ClusterTest + public void testClassicConsumerSeek() throws Exception { + testSeek( + Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerSeek() throws Exception { + testSeek( + Map.of(GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testSeek(Map consumerConfig) throws Exception { + var totalRecords = 50; + var mid = totalRecords / 2; + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = 0; + sendRecords(producer, TP, totalRecords, startingTimestamp); + + consumer.assign(List.of(TP)); + consumer.seekToEnd(List.of(TP)); + assertEquals(totalRecords, consumer.position(TP)); + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty()); + + consumer.seekToBeginning(List.of(TP)); + assertEquals(0, consumer.position(TP)); + consumeAndVerifyRecords(consumer, TP, 1, 0, 0, startingTimestamp); + + consumer.seek(TP, mid); + assertEquals(mid, consumer.position(TP)); + + consumeAndVerifyRecords(consumer, TP, 1, mid, mid, mid); + + // Test seek compressed message + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + sendCompressedMessages(totalRecords, tp2); + consumer.assign(List.of(tp2)); + + consumer.seekToEnd(List.of(tp2)); + assertEquals(totalRecords, consumer.position(tp2)); + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty()); + + consumer.seekToBeginning(List.of(tp2)); + assertEquals(0L, consumer.position(tp2)); + consumeAndVerifyRecords(consumer, tp2, 1, 0); + + consumer.seek(tp2, mid); + assertEquals(mid, consumer.position(tp2)); + consumeAndVerifyRecords(consumer, tp2, 1, mid, mid, mid); + } + } + + @ClusterTest + public void testClassicConsumerPartitionPauseAndResume() throws Exception { + testPartitionPauseAndResume(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPartitionPauseAndResume() throws Exception { + testPartitionPauseAndResume(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPartitionPauseAndResume(Map consumerConfig) throws Exception { + var partitions = List.of(TP); + var numRecords = 5; + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, numRecords, startingTimestamp); + + consumer.assign(partitions); + consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp); + consumer.pause(partitions); + startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, numRecords, startingTimestamp); + assertTrue(consumer.poll(Duration.ofMillis(100)).isEmpty()); + consumer.resume(partitions); + consumeAndVerifyRecords(consumer, TP, numRecords, 5, 0, startingTimestamp); + } + } + + @ClusterTest + public void testClassicConsumerInterceptors() throws Exception { + testInterceptors(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerInterceptors() throws Exception { + testInterceptors(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testInterceptors(Map consumerConfig) throws Exception { + var appendStr = "mock"; + MockConsumerInterceptor.resetCounters(); + MockProducerInterceptor.resetCounters(); + + // create producer with interceptor + Map producerConfig = Map.of( + ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName(), + KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(), + VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(), + "mock.interceptor.append", appendStr + ); + // create consumer with interceptor + Map consumerConfigOverride = new HashMap<>(consumerConfig); + consumerConfigOverride.put(INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); + consumerConfigOverride.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerConfigOverride.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + + try (Producer producer = cluster.producer(producerConfig); + Consumer consumer = cluster.consumer(consumerConfigOverride) + ) { + // produce records + var numRecords = 10; + List> futures = new ArrayList<>(); + for (var i = 0; i < numRecords; i++) { + Future future = producer.send( + new ProducerRecord<>(TP.topic(), TP.partition(), "key " + i, "value " + i) + ); + futures.add(future); + } + + // Wait for all sends to complete + futures.forEach(future -> assertDoesNotThrow(() -> future.get())); + + assertEquals(numRecords, MockProducerInterceptor.ONSEND_COUNT.intValue()); + assertEquals(numRecords, MockProducerInterceptor.ON_SUCCESS_COUNT.intValue()); + + // send invalid record + assertThrows( + Throwable.class, + () -> producer.send(null), + "Should not allow sending a null record" + ); + assertEquals( + 1, + MockProducerInterceptor.ON_ERROR_COUNT.intValue(), + "Interceptor should be notified about exception" + ); + assertEquals( + 0, + MockProducerInterceptor.ON_ERROR_WITH_METADATA_COUNT.intValue(), + "Interceptor should not receive metadata with an exception when record is null" + ); + + consumer.assign(List.of(TP)); + consumer.seek(TP, 0); + + // consume and verify that values are modified by interceptors + var records = consumeRecords(consumer, numRecords); + for (var i = 0; i < numRecords; i++) { + ConsumerRecord record = records.get(i); + assertEquals("key " + i, record.key()); + assertEquals(("value " + i + appendStr).toUpperCase(Locale.ROOT), record.value()); + } + + // commit sync and verify onCommit is called + var commitCountBefore = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue(); + consumer.commitSync(Map.of(TP, new OffsetAndMetadata(2L, "metadata"))); + OffsetAndMetadata metadata = consumer.committed(Set.of(TP)).get(TP); + assertEquals(2, metadata.offset()); + assertEquals("metadata", metadata.metadata()); + assertEquals(commitCountBefore + 1, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue()); + + // commit async and verify onCommit is called + var offsetsToCommit = Map.of(TP, new OffsetAndMetadata(5L, null)); + sendAndAwaitAsyncCommit(consumer, Optional.of(offsetsToCommit)); + metadata = consumer.committed(Set.of(TP)).get(TP); + assertEquals(5, metadata.offset()); + // null metadata will be converted to an empty string + assertEquals("", metadata.metadata()); + assertEquals(commitCountBefore + 2, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue()); + } + // cleanup + MockConsumerInterceptor.resetCounters(); + MockProducerInterceptor.resetCounters(); + } + + @ClusterTest + public void testClassicConsumerInterceptorsWithWrongKeyValue() throws Exception { + testInterceptorsWithWrongKeyValue(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerInterceptorsWithWrongKeyValue() throws Exception { + testInterceptorsWithWrongKeyValue(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testInterceptorsWithWrongKeyValue(Map consumerConfig) throws Exception { + var appendStr = "mock"; + // create producer with interceptor that has different key and value types from the producer + Map producerConfig = Map.of( + ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName(), + "mock.interceptor.append", appendStr + ); + // create consumer with interceptor that has different key and value types from the consumer + Map consumerConfigOverride = new HashMap<>(consumerConfig); + consumerConfigOverride.put(INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); + + try (Producer producer = cluster.producer(producerConfig); + Consumer consumer = cluster.consumer(consumerConfigOverride) + ) { + // producing records should succeed + producer.send(new ProducerRecord<>( + TP.topic(), + TP.partition(), + "key".getBytes(), + "value will not be modified".getBytes() + )); + + consumer.assign(List.of(TP)); + consumer.seek(TP, 0); + // consume and verify that values are not modified by interceptors -- their exceptions are caught and logged, but not propagated + var records = consumeRecords(consumer, 1); + var record = records.get(0); + assertEquals("value will not be modified", new String(record.value())); + } + } + + @ClusterTest + public void testClassicConsumerConsumeMessagesWithCreateTime() throws Exception { + testConsumeMessagesWithCreateTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerConsumeMessagesWithCreateTime() throws Exception { + testConsumeMessagesWithCreateTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testConsumeMessagesWithCreateTime(Map consumerConfig) throws Exception { + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + var numRecords = 50; + var tp2 = new TopicPartition(TOPIC, 1); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + // Test non-compressed messages + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, numRecords, startingTimestamp); + consumer.assign(List.of(TP)); + consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp); + + // Test compressed messages + sendCompressedMessages(numRecords, tp2); + consumer.assign(List.of(tp2)); + consumeAndVerifyRecords(consumer, tp2, numRecords, 0, 0, 0); + } + } + + @ClusterTest + public void testClassicConsumerConsumeMessagesWithLogAppendTime() throws Exception { + testConsumeMessagesWithLogAppendTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerConsumeMessagesWithLogAppendTime() throws Exception { + testConsumeMessagesWithLogAppendTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testConsumeMessagesWithLogAppendTime(Map consumerConfig) throws Exception { + var topicName = "testConsumeMessagesWithLogAppendTime"; + var startTime = System.currentTimeMillis(); + var numRecords = 50; + cluster.createTopic(topicName, 2, (short) 2, Map.of(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "LogAppendTime")); + + try (Consumer consumer = cluster.consumer(consumerConfig)) { + // Test non-compressed messages + var tp1 = new TopicPartition(topicName, 0); + sendRecords(cluster, tp1, numRecords); + consumer.assign(List.of(tp1)); + consumeAndVerifyRecordsWithTimeTypeLogAppend(consumer, tp1, numRecords, startTime); + + // Test compressed messages + var tp2 = new TopicPartition(topicName, 1); + sendCompressedMessages(numRecords, tp2); + consumer.assign(List.of(tp2)); + consumeAndVerifyRecordsWithTimeTypeLogAppend(consumer, tp2, numRecords, startTime); + } + } + + @ClusterTest + public void testClassicConsumerListTopics() throws Exception { + testListTopics(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerListTopics() throws Exception { + testListTopics(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testListTopics(Map consumerConfig) throws Exception { + var numParts = 2; + var topic1 = "part-test-topic-1"; + var topic2 = "part-test-topic-2"; + var topic3 = "part-test-topic-3"; + cluster.createTopic(topic1, numParts, (short) 1); + cluster.createTopic(topic2, numParts, (short) 1); + cluster.createTopic(topic3, numParts, (short) 1); + + sendRecords(cluster, new TopicPartition(topic1, 0), 1); + + try (var consumer = cluster.consumer(consumerConfig)) { + // consumer some messages, and we can list the internal topic __consumer_offsets + consumer.subscribe(List.of(topic1)); + consumer.poll(Duration.ofMillis(100)); + var topics = consumer.listTopics(); + assertNotNull(topics); + assertEquals(4, topics.size()); + assertEquals(2, topics.get(topic1).size()); + assertEquals(2, topics.get(topic2).size()); + assertEquals(2, topics.get(topic3).size()); + } + } + + @ClusterTest + public void testClassicConsumerPauseStateNotPreservedByRebalance() throws Exception { + testPauseStateNotPreservedByRebalance(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + SESSION_TIMEOUT_MS_CONFIG, 100, + HEARTBEAT_INTERVAL_MS_CONFIG, 30 + )); + } + + @ClusterTest + public void testAsyncConsumerPauseStateNotPreservedByRebalance() throws Exception { + testPauseStateNotPreservedByRebalance(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPauseStateNotPreservedByRebalance(Map consumerConfig) throws Exception { + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, 5, startingTimestamp); + consumer.subscribe(List.of(TOPIC)); + consumeAndVerifyRecords(consumer, TP, 5, 0, 0, startingTimestamp); + consumer.pause(List.of(TP)); + + // subscribe to a new topic to trigger a rebalance + consumer.subscribe(List.of("topic2")); + + // after rebalance, our position should be reset and our pause state lost, + // so we should be able to consume from the beginning + consumeAndVerifyRecords(consumer, TP, 0, 5, 0, startingTimestamp); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLeadMetricsCleanUpWithSubscribe() throws Exception { + String consumerClientId = "testClassicConsumerPerPartitionLeadMetricsCleanUpWithSubscribe"; + testPerPartitionLeadMetricsCleanUpWithSubscribe(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLeadMetricsCleanUpWithSubscribe() throws Exception { + String consumerClientId = "testAsyncConsumerPerPartitionLeadMetricsCleanUpWithSubscribe"; + testPerPartitionLeadMetricsCleanUpWithSubscribe(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + private void testPerPartitionLeadMetricsCleanUpWithSubscribe( + Map consumerConfig, + String consumerClientId + ) throws Exception { + var numMessages = 1000; + var topic2 = "topic2"; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + + try (Consumer consumer = cluster.consumer(consumerConfig)) { + // send some messages. + sendRecords(cluster, TP, numMessages); + + // Test subscribe + // Create a consumer and consumer some messages. + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(TOPIC, topic2), listener); + var records = awaitNonEmptyRecords(consumer, TP); + assertEquals(1, listener.callsToAssigned, "should be assigned once"); + + // Verify the metric exist. + Map tags1 = Map.of( + "client-id", consumerClientId, + "topic", TP.topic(), + "partition", String.valueOf(TP.partition()) + ); + + Map tags2 = Map.of( + "client-id", consumerClientId, + "topic", tp2.topic(), + "partition", String.valueOf(tp2.partition()) + ); + + var fetchLead0 = consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1)); + assertNotNull(fetchLead0); + assertEquals((double) records.count(), fetchLead0.metricValue(), "The lead should be " + records.count()); + + // Remove topic from subscription + consumer.subscribe(List.of(topic2), listener); + awaitRebalance(consumer, listener); + + // Verify the metric has gone + assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1))); + assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags2))); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLagMetricsCleanUpWithSubscribe() throws Exception { + String consumerClientId = "testClassicConsumerPerPartitionLagMetricsCleanUpWithSubscribe"; + testPerPartitionLagMetricsCleanUpWithSubscribe(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLagMetricsCleanUpWithSubscribe() throws Exception { + String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsCleanUpWithSubscribe"; + testPerPartitionLagMetricsCleanUpWithSubscribe(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + private void testPerPartitionLagMetricsCleanUpWithSubscribe( + Map consumerConfig, + String consumerClientId + ) throws Exception { + int numMessages = 1000; + var topic2 = "topic2"; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(topic2, 2, (short) BROKER_COUNT); + + try (Consumer consumer = cluster.consumer(consumerConfig)) { + // send some messages. + sendRecords(cluster, TP, numMessages); + + // Test subscribe + // Create a consumer and consumer some messages. + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(TOPIC, topic2), listener); + var records = awaitNonEmptyRecords(consumer, TP); + assertEquals(1, listener.callsToAssigned, "should be assigned once"); + + // Verify the metric exist. + Map tags1 = Map.of( + "client-id", consumerClientId, + "topic", TP.topic(), + "partition", String.valueOf(TP.partition()) + ); + + Map tags2 = Map.of( + "client-id", consumerClientId, + "topic", tp2.topic(), + "partition", String.valueOf(tp2.partition()) + ); + + var fetchLag0 = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1)); + assertNotNull(fetchLag0); + var expectedLag = numMessages - records.count(); + assertEquals(expectedLag, (double) fetchLag0.metricValue(), EPSILON, "The lag should be " + expectedLag); + + // Remove topic from subscription + consumer.subscribe(List.of(topic2), listener); + awaitRebalance(consumer, listener); + + // Verify the metric has gone + assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1))); + assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags2))); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLeadMetricsCleanUpWithAssign() throws Exception { + String consumerClientId = "testClassicConsumerPerPartitionLeadMetricsCleanUpWithAssign"; + testPerPartitionLeadMetricsCleanUpWithAssign(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLeadMetricsCleanUpWithAssign() throws Exception { + String consumerClientId = "testAsyncConsumerPerPartitionLeadMetricsCleanUpWithAssign"; + testPerPartitionLeadMetricsCleanUpWithAssign(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + private void testPerPartitionLeadMetricsCleanUpWithAssign( + Map consumerConfig, + String consumerClientId + ) throws Exception { + var numMessages = 1000; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + // Test assign send some messages. + sendRecords(producer, TP, numMessages, System.currentTimeMillis()); + sendRecords(producer, tp2, numMessages, System.currentTimeMillis()); + + consumer.assign(List.of(TP)); + var records = awaitNonEmptyRecords(consumer, TP); + + // Verify the metric exist. + Map tags = Map.of( + "client-id", consumerClientId, + "topic", TP.topic(), + "partition", String.valueOf(TP.partition()) + ); + + var fetchLead = consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)); + assertNotNull(fetchLead); + assertEquals((double) records.count(), fetchLead.metricValue(), "The lead should be " + records.count()); + + consumer.assign(List.of(tp2)); + awaitNonEmptyRecords(consumer, tp2); + assertNull(consumer.metrics().get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags))); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLagMetricsCleanUpWithAssign() throws Exception { + String consumerClientId = "testClassicConsumerPerPartitionLagMetricsCleanUpWithAssign"; + testPerPartitionLagMetricsCleanUpWithAssign(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLagMetricsCleanUpWithAssign() throws Exception { + String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsCleanUpWithAssign"; + testPerPartitionLagMetricsCleanUpWithAssign(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId + ), consumerClientId); + } + + private void testPerPartitionLagMetricsCleanUpWithAssign( + Map consumerConfig, + String consumerClientId + ) throws Exception { + var numMessages = 1000; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + // Test assign send some messages. + sendRecords(producer, TP, numMessages, System.currentTimeMillis()); + sendRecords(producer, tp2, numMessages, System.currentTimeMillis()); + + consumer.assign(List.of(TP)); + var records = awaitNonEmptyRecords(consumer, TP); + + // Verify the metric exist. + Map tags = Map.of( + "client-id", consumerClientId, + "topic", TP.topic(), + "partition", String.valueOf(TP.partition()) + ); + + var fetchLag = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)); + assertNotNull(fetchLag); + + var expectedLag = numMessages - records.count(); + assertEquals(expectedLag, (double) fetchLag.metricValue(), EPSILON, "The lag should be " + expectedLag); + consumer.assign(List.of(tp2)); + awaitNonEmptyRecords(consumer, tp2); + assertNull(consumer.metrics().get(new MetricName(TP + ".records-lag", "consumer-fetch-manager-metrics", "", tags))); + assertNull(consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags))); + } + } + + @ClusterTest + public void testClassicConsumerPerPartitionLagMetricsWhenReadCommitted() throws Exception { + String consumerClientId = "testClassicConsumerPerPartitionLagMetricsWhenReadCommitted"; + testPerPartitionLagMetricsWhenReadCommitted(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId, + ISOLATION_LEVEL_CONFIG, "read_committed" + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerPerPartitionLagMetricsWhenReadCommitted() throws Exception { + String consumerClientId = "testAsyncConsumerPerPartitionLagMetricsWhenReadCommitted"; + testPerPartitionLagMetricsWhenReadCommitted(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId, + ISOLATION_LEVEL_CONFIG, "read_committed" + ), consumerClientId); + } + + private void testPerPartitionLagMetricsWhenReadCommitted( + Map consumerConfig, + String consumerClientId + ) throws Exception { + var numMessages = 1000; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + // Test assign send some messages. + sendRecords(producer, TP, numMessages, System.currentTimeMillis()); + sendRecords(producer, tp2, numMessages, System.currentTimeMillis()); + + consumer.assign(List.of(TP)); + awaitNonEmptyRecords(consumer, TP); + + // Verify the metric exist. + Map tags = Map.of( + "client-id", consumerClientId, + "topic", TP.topic(), + "partition", String.valueOf(TP.partition()) + ); + + var fetchLag = consumer.metrics().get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)); + assertNotNull(fetchLag); + } + } + + @ClusterTest + public void testClassicConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured() throws Exception { + var consumerClientId = "testClassicConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured"; + testQuotaMetricsNotCreatedIfNoQuotasConfigured(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId, + ISOLATION_LEVEL_CONFIG, "read_committed" + ), consumerClientId); + } + + @ClusterTest + public void testAsyncConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured() throws Exception { + var consumerClientId = "testAsyncConsumerQuotaMetricsNotCreatedIfNoQuotasConfigured"; + testQuotaMetricsNotCreatedIfNoQuotasConfigured(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, consumerClientId, + CLIENT_ID_CONFIG, consumerClientId, + ISOLATION_LEVEL_CONFIG, "read_committed" + ), consumerClientId); + } + + private void testQuotaMetricsNotCreatedIfNoQuotasConfigured( + Map consumerConfig, + String consumerClientId + ) throws Exception { + var producerClientId = UUID.randomUUID().toString(); + var numRecords = 1000; + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(Map.of(ProducerConfig.CLIENT_ID_CONFIG, producerClientId)); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + sendRecords(producer, TP, numRecords, startingTimestamp); + + consumer.assign(List.of(TP)); + consumer.seek(TP, 0); + consumeAndVerifyRecords(consumer, TP, numRecords, 0, 0, startingTimestamp); + + var brokers = cluster.brokers().values(); + brokers.forEach(broker -> assertNoMetric(broker, "byte-rate", QuotaType.PRODUCE, producerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.PRODUCE, producerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "byte-rate", QuotaType.FETCH, consumerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.FETCH, consumerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "request-time", QuotaType.REQUEST, producerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.REQUEST, producerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "request-time", QuotaType.REQUEST, consumerClientId)); + brokers.forEach(broker -> assertNoMetric(broker, "throttle-time", QuotaType.REQUEST, consumerClientId)); + } + } + + private void assertNoMetric(KafkaBroker broker, String name, QuotaType quotaType, String clientId) { + var metricName = broker.metrics().metricName(name, quotaType.toString(), "", "user", "", "client-id", clientId); + assertNull(broker.metrics().metric(metricName), "Metric should not have been created " + metricName); + } + + @ClusterTest + public void testClassicConsumerSeekThrowsIllegalStateIfPartitionsNotAssigned() throws Exception { + testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerSeekThrowsIllegalStateIfPartitionsNotAssigned() throws Exception { + testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testSeekThrowsIllegalStateIfPartitionsNotAssigned(Map consumerConfig) throws Exception { + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + try (var consumer = cluster.consumer(consumerConfig)) { + var e = assertThrows(IllegalStateException.class, () -> consumer.seekToEnd(List.of(TP))); + assertEquals("No current assignment for partition " + TP, e.getMessage()); + } + } + + @ClusterTest + public void testClassicConsumingWithNullGroupId() throws Exception { + testConsumingWithNullGroupId(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers() + )); + } + + @ClusterTest + public void testAsyncConsumerConsumingWithNullGroupId() throws Exception { + testConsumingWithNullGroupId(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers() + )); + } + + private void testConsumingWithNullGroupId(Map consumerConfig) throws Exception { + var partition = 0; + cluster.createTopic(TOPIC, 1, (short) 1); + + // consumer 1 uses the default group id and consumes from earliest offset + Map consumer1Config = new HashMap<>(consumerConfig); + consumer1Config.put(AUTO_OFFSET_RESET_CONFIG, "earliest"); + consumer1Config.put(CLIENT_ID_CONFIG, "consumer1"); + + // consumer 2 uses the default group id and consumes from latest offset + Map consumer2Config = new HashMap<>(consumerConfig); + consumer2Config.put(AUTO_OFFSET_RESET_CONFIG, "latest"); + consumer2Config.put(CLIENT_ID_CONFIG, "consumer2"); + + // consumer 3 uses the default group id and starts from an explicit offset + Map consumer3Config = new HashMap<>(consumerConfig); + consumer3Config.put(CLIENT_ID_CONFIG, "consumer3"); + + try (Producer producer = cluster.producer(); + Consumer consumer1 = new KafkaConsumer<>(consumer1Config); + Consumer consumer2 = new KafkaConsumer<>(consumer2Config); + Consumer consumer3 = new KafkaConsumer<>(consumer3Config) + ) { + producer.send(new ProducerRecord<>(TOPIC, partition, "k1".getBytes(), "v1".getBytes())).get(); + producer.send(new ProducerRecord<>(TOPIC, partition, "k2".getBytes(), "v2".getBytes())).get(); + producer.send(new ProducerRecord<>(TOPIC, partition, "k3".getBytes(), "v3".getBytes())).get(); + + consumer1.assign(List.of(TP)); + consumer2.assign(List.of(TP)); + consumer3.assign(List.of(TP)); + consumer3.seek(TP, 1); + + var numRecords1 = consumer1.poll(Duration.ofMillis(5000)).count(); + assertThrows(InvalidGroupIdException.class, consumer1::commitSync); + assertThrows(InvalidGroupIdException.class, () -> consumer2.committed(Set.of(TP))); + + var numRecords2 = consumer2.poll(Duration.ofMillis(5000)).count(); + var numRecords3 = consumer3.poll(Duration.ofMillis(5000)).count(); + + consumer1.unsubscribe(); + consumer2.unsubscribe(); + consumer3.unsubscribe(); + + assertTrue(consumer1.assignment().isEmpty()); + assertTrue(consumer2.assignment().isEmpty()); + assertTrue(consumer3.assignment().isEmpty()); + + consumer1.close(); + consumer2.close(); + consumer3.close(); + + assertEquals(3, numRecords1, "Expected consumer1 to consume from earliest offset"); + assertEquals(0, numRecords2, "Expected consumer2 to consume from latest offset"); + assertEquals(2, numRecords3, "Expected consumer3 to consume from offset 1"); + } + } + + @ClusterTest + public void testClassicConsumerNullGroupIdNotSupportedIfCommitting() throws Exception { + testNullGroupIdNotSupportedIfCommitting(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers(), + AUTO_OFFSET_RESET_CONFIG, "earliest", + CLIENT_ID_CONFIG, "consumer1" + )); + } + + @ClusterTest + public void testAsyncConsumerNullGroupIdNotSupportedIfCommitting() throws Exception { + testNullGroupIdNotSupportedIfCommitting(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers(), + AUTO_OFFSET_RESET_CONFIG, "earliest", + CLIENT_ID_CONFIG, "consumer1" + )); + } + + private void testNullGroupIdNotSupportedIfCommitting(Map consumerConfig) throws Exception { + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + try (var consumer = new KafkaConsumer<>(consumerConfig)) { + consumer.assign(List.of(TP)); + assertThrows(InvalidGroupIdException.class, consumer::commitSync); + } + } + + @ClusterTest + public void testClassicConsumerStaticConsumerDetectsNewPartitionCreatedAfterRestart() throws Exception { + testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, "my-group-id", + GROUP_INSTANCE_ID_CONFIG, "my-instance-id", + METADATA_MAX_AGE_CONFIG, 100, + MAX_POLL_INTERVAL_MS_CONFIG, 6000 + )); + } + + @ClusterTest + public void testAsyncConsumerStaticConsumerDetectsNewPartitionCreatedAfterRestart() throws Exception { + testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + GROUP_ID_CONFIG, "my-group-id", + GROUP_INSTANCE_ID_CONFIG, "my-instance-id", + METADATA_MAX_AGE_CONFIG, 100, + MAX_POLL_INTERVAL_MS_CONFIG, 6000 + )); + } + + private void testStaticConsumerDetectsNewPartitionCreatedAfterRestart(Map consumerConfig) throws Exception { + var foo = "foo"; + var foo0 = new TopicPartition(foo, 0); + var foo1 = new TopicPartition(foo, 1); + cluster.createTopic(foo, 1, (short) 1); + + try (Consumer consumer1 = cluster.consumer(consumerConfig); + Consumer consumer2 = cluster.consumer(consumerConfig); + var admin = cluster.admin() + ) { + consumer1.subscribe(List.of(foo)); + awaitAssignment(consumer1, Set.of(foo0)); + consumer1.close(); + + consumer2.subscribe(List.of(foo)); + awaitAssignment(consumer2, Set.of(foo0)); + + admin.createPartitions(Map.of(foo, NewPartitions.increaseTo(2))).all().get(); + awaitAssignment(consumer2, Set.of(foo0, foo1)); + } + } + + @ClusterTest + public void testClassicConsumerEndOffsets() throws Exception { + testEndOffsets(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + METADATA_MAX_AGE_CONFIG, 100, + MAX_POLL_INTERVAL_MS_CONFIG, 6000 + )); + } + + @ClusterTest + public void testAsyncConsumerEndOffsets() throws Exception { + testEndOffsets(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + METADATA_MAX_AGE_CONFIG, 100, + MAX_POLL_INTERVAL_MS_CONFIG, 6000 + )); + } + + private void testEndOffsets(Map consumerConfig) throws Exception { + var numRecords = 10000; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + var startingTimestamp = System.currentTimeMillis(); + for (var i = 0; i < numRecords; i++) { + var timestamp = startingTimestamp + (long) i; + ProducerRecord record = new ProducerRecord<>( + TP.topic(), + TP.partition(), + timestamp, + ("key " + i).getBytes(), + ("value " + i).getBytes() + ); + producer.send(record); + } + producer.flush(); + + consumer.subscribe(List.of(TOPIC)); + awaitAssignment(consumer, Set.of(TP, tp2)); + + var endOffsets = consumer.endOffsets(Set.of(TP)); + assertEquals(numRecords, endOffsets.get(TP)); + } + } + + @ClusterTest + public void testClassicConsumerFetchOffsetsForTime() throws Exception { + testFetchOffsetsForTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerFetchOffsetsForTime() throws Exception { + testFetchOffsetsForTime(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testFetchOffsetsForTime(Map consumerConfig) throws Exception { + var numPartitions = 2; + var tp2 = new TopicPartition(TOPIC, 1); + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + + try (Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer(consumerConfig) + ) { + Map timestampsToSearch = new HashMap<>(); + for (int part = 0, i = 0; part < numPartitions; part++, i++) { + var tp = new TopicPartition(TOPIC, part); + // key, val, and timestamp equal to the sequence number. + sendRecords(producer, tp, 100, 0); + timestampsToSearch.put(tp, i * 20L); + } + // Test negative target time + assertThrows(IllegalArgumentException.class, () -> consumer.offsetsForTimes(Map.of(TP, -1L))); + var timestampOffsets = consumer.offsetsForTimes(timestampsToSearch); + + var timestampTp0 = timestampOffsets.get(TP); + assertEquals(0, timestampTp0.offset()); + assertEquals(0, timestampTp0.timestamp()); + assertEquals(Optional.of(0), timestampTp0.leaderEpoch()); + + var timestampTp1 = timestampOffsets.get(tp2); + assertEquals(20, timestampTp1.offset()); + assertEquals(20, timestampTp1.timestamp()); + assertEquals(Optional.of(0), timestampTp1.leaderEpoch()); + } + } + + @ClusterTest + public void testClassicConsumerPositionRespectsTimeout() { + testPositionRespectsTimeout(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPositionRespectsTimeout() { + testPositionRespectsTimeout(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPositionRespectsTimeout(Map consumerConfig) { + var topicPartition = new TopicPartition(TOPIC, 15); + try (var consumer = cluster.consumer(consumerConfig)) { + consumer.assign(List.of(topicPartition)); + // When position() is called for a topic/partition that doesn't exist, the consumer will repeatedly update the + // local metadata. However, it should give up after the user-supplied timeout has past. + assertThrows(TimeoutException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(3))); + } + } + + @ClusterTest + public void testClassicConsumerPositionRespectsWakeup() { + testPositionRespectsWakeup(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerPositionRespectsWakeup() { + testPositionRespectsWakeup(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testPositionRespectsWakeup(Map consumerConfig) { + var topicPartition = new TopicPartition(TOPIC, 15); + try (var consumer = cluster.consumer(consumerConfig)) { + consumer.assign(List.of(topicPartition)); + CompletableFuture.runAsync(() -> { + try { + TimeUnit.SECONDS.sleep(1); + consumer.wakeup(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }); + assertThrows(WakeupException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(3))); + } + } + + @ClusterTest + public void testClassicConsumerPositionWithErrorConnectionRespectsWakeup() { + testPositionWithErrorConnectionRespectsWakeup(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + // make sure the connection fails + BOOTSTRAP_SERVERS_CONFIG, "localhost:12345" + )); + } + + @ClusterTest + public void testAsyncConsumerPositionWithErrorConnectionRespectsWakeup() { + testPositionWithErrorConnectionRespectsWakeup(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + // make sure the connection fails + BOOTSTRAP_SERVERS_CONFIG, "localhost:12345" + )); + } + + private void testPositionWithErrorConnectionRespectsWakeup(Map consumerConfig) { + var topicPartition = new TopicPartition(TOPIC, 15); + try (var consumer = cluster.consumer(consumerConfig)) { + consumer.assign(List.of(topicPartition)); + CompletableFuture.runAsync(() -> { + try { + TimeUnit.SECONDS.sleep(1); + consumer.wakeup(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }); + assertThrows(WakeupException.class, () -> consumer.position(topicPartition, Duration.ofSeconds(100))); + } + } + + @Flaky("KAFKA-18031") + @ClusterTest + public void testClassicConsumerCloseLeavesGroupOnInterrupt() throws Exception { + testCloseLeavesGroupOnInterrupt(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + AUTO_OFFSET_RESET_CONFIG, "earliest", + GROUP_ID_CONFIG, "group_test,", + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers() + )); + } + + @Flaky("KAFKA-18031") + @ClusterTest + public void testAsyncConsumerCloseLeavesGroupOnInterrupt() throws Exception { + testCloseLeavesGroupOnInterrupt(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName(), + AUTO_OFFSET_RESET_CONFIG, "earliest", + GROUP_ID_CONFIG, "group_test,", + BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers() + )); + } + + private void testCloseLeavesGroupOnInterrupt(Map consumerConfig) throws Exception { + try (Consumer consumer = cluster.consumer(consumerConfig)) { + var listener = new TestConsumerReassignmentListener(); + consumer.subscribe(List.of(TOPIC), listener); + awaitRebalance(consumer, listener); + + assertEquals(1, listener.callsToAssigned); + assertEquals(0, listener.callsToRevoked); + + try { + Thread.currentThread().interrupt(); + assertThrows(InterruptException.class, consumer::close); + } finally { + // Clear the interrupted flag so we don't create problems for subsequent tests. + Thread.interrupted(); + } + + assertEquals(1, listener.callsToAssigned); + assertEquals(1, listener.callsToRevoked); + + Map consumerConfigMap = new HashMap<>(consumerConfig); + var config = new ConsumerConfig(consumerConfigMap); + + // Set the wait timeout to be only *half* the configured session timeout. This way we can make sure that the + // consumer explicitly left the group as opposed to being kicked out by the broker. + var leaveGroupTimeoutMs = config.getInt(SESSION_TIMEOUT_MS_CONFIG) / 2; + + TestUtils.waitForCondition( + () -> checkGroupMemberEmpty(config), + leaveGroupTimeoutMs, + "Consumer did not leave the consumer group within " + leaveGroupTimeoutMs + " ms of close" + ); + } + } + + private boolean checkGroupMemberEmpty(ConsumerConfig config) { + try (var admin = cluster.admin()) { + var groupId = config.getString(GROUP_ID_CONFIG); + var result = admin.describeConsumerGroups(List.of(groupId)); + var groupDescription = result.describedGroups().get(groupId).get(); + return groupDescription.members().isEmpty(); + } catch (ExecutionException | InterruptedException e) { + return false; + } + } + + @ClusterTest + public void testClassicConsumerOffsetRelatedWhenTimeoutZero() throws Exception { + testOffsetRelatedWhenTimeoutZero(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT) + )); + } + + @ClusterTest + public void testAsyncConsumerOffsetRelatedWhenTimeoutZero() throws Exception { + testOffsetRelatedWhenTimeoutZero(Map.of( + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT) + )); + } + + private void testOffsetRelatedWhenTimeoutZero(Map consumerConfig) throws Exception { + cluster.createTopic(TOPIC, 2, (short) BROKER_COUNT); + try (var consumer = cluster.consumer(consumerConfig)) { + var result1 = consumer.beginningOffsets(List.of(TP), Duration.ZERO); + assertNotNull(result1); + assertEquals(0, result1.size()); + + var result2 = consumer.endOffsets(List.of(TP), Duration.ZERO); + assertNotNull(result2); + assertEquals(0, result2.size()); + + var result3 = consumer.offsetsForTimes(Map.of(TP, 0L), Duration.ZERO); + assertNotNull(result3); + assertEquals(1, result3.size()); + assertNull(result3.get(TP)); + } + } + + private void sendCompressedMessages(int numRecords, TopicPartition tp) { + Map config = Map.of( + COMPRESSION_TYPE_CONFIG, CompressionType.GZIP.name, + LINGER_MS_CONFIG, Integer.MAX_VALUE + ); + try (Producer producer = cluster.producer(config)) { + IntStream.range(0, numRecords).forEach(i -> producer.send(new ProducerRecord<>( + tp.topic(), + tp.partition(), + (long) i, + ("key " + i).getBytes(), + ("value " + i).getBytes() + ))); + } + } + + @ClusterTest + public void testClassicConsumerStallBetweenPoll() throws Exception { + testStallBetweenPoll(GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumerStallBetweenPoll() throws Exception { + testStallBetweenPoll(GroupProtocol.CONSUMER); + } + + /** + * This test is to prove that the intermittent stalling that has been experienced when using the asynchronous + * consumer, as filed under KAFKA-19259, have been fixed. + * + *

+ * + * The basic idea is to have one thread that produces a record every 500 ms. and the main thread that consumes + * records without pausing between polls for much more than the produce delay. In the test case filed in + * KAFKA-19259, the consumer sometimes pauses for up to 5-10 seconds despite records being produced every second. + */ + private void testStallBetweenPoll(GroupProtocol groupProtocol) throws Exception { + var testTopic = "stall-test-topic"; + var numPartitions = 6; + cluster.createTopic(testTopic, numPartitions, (short) BROKER_COUNT); + + // The producer must produce slowly to tickle the scenario. + var produceDelay = 500; + + var executor = Executors.newScheduledThreadPool(1); + + try (var producer = cluster.producer()) { + // Start a thread running that produces records at a relative trickle. + executor.scheduleWithFixedDelay( + () -> producer.send(new ProducerRecord<>(testTopic, TestUtils.randomBytes(64))), + 0, + produceDelay, + TimeUnit.MILLISECONDS + ); + + Map consumerConfig = Map.of(GROUP_PROTOCOL_CONFIG, groupProtocol.name().toLowerCase(Locale.ROOT)); + + // Assign a tolerance for how much time is allowed to pass between Consumer.poll() calls given that there + // should be *at least* one record to read every second. + var pollDelayTolerance = 2000; + + try (Consumer consumer = cluster.consumer(consumerConfig)) { + consumer.subscribe(List.of(testTopic)); + + // This is here to allow the consumer time to settle the group membership/assignment. + awaitNonEmptyRecords(consumer, new TopicPartition(testTopic, 0)); + + // Keep track of the last time the poll is invoked to ensure the deltas between invocations don't + // exceed the delay threshold defined above. + var beforePoll = System.currentTimeMillis(); + consumer.poll(Duration.ofSeconds(5)); + consumer.poll(Duration.ofSeconds(5)); + var afterPoll = System.currentTimeMillis(); + var pollDelay = afterPoll - beforePoll; + + if (pollDelay > pollDelayTolerance) + fail("Detected a stall of " + pollDelay + " ms between Consumer.poll() invocations despite a Producer producing records every " + produceDelay + " ms"); + } finally { + executor.shutdownNow(); + // Wait for any active tasks to terminate to ensure consumer is not closed while being used from another thread + assertTrue(executor.awaitTermination(5, TimeUnit.SECONDS), "Executor did not terminate"); + } + } + } + + private ConsumerRecords awaitNonEmptyRecords( + Consumer consumer, + TopicPartition tp + ) throws Exception { + AtomicReference> result = new AtomicReference<>(); + + TestUtils.waitForCondition(() -> { + var polledRecords = consumer.poll(Duration.ofSeconds(10)); + boolean hasRecords = !polledRecords.isEmpty(); + if (hasRecords) { + result.set(polledRecords); + } + return hasRecords; + }, "Timed out waiting for non-empty records from topic " + tp.topic() + " partition " + tp.partition()); + + return result.get(); + } + + public static class SerializerImpl implements Serializer { + private final ByteArraySerializer serializer = new ByteArraySerializer(); + + @Override + public byte[] serialize(String topic, byte[] data) { + throw new RuntimeException("This method should not be called"); + } + + @Override + public byte[] serialize(String topic, Headers headers, byte[] data) { + headers.add("content-type", "application/octet-stream".getBytes()); + return serializer.serialize(topic, headers, data); + } + } + + public static class DeserializerImpl implements Deserializer { + private final ByteArrayDeserializer deserializer = new ByteArrayDeserializer(); + + @Override + public byte[] deserialize(String topic, byte[] data) { + throw new RuntimeException("This method should not be called"); + } + + @Override + public byte[] deserialize(String topic, Headers headers, byte[] data) { + Header contentType = headers.lastHeader("content-type"); + assertNotNull(contentType); + assertEquals("application/octet-stream", new String(contentType.value())); + return deserializer.deserialize(topic, headers, data); + } + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/RackAwareAssignor.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/RackAwareAssignor.java new file mode 100644 index 0000000000000..4b9ee6fd27418 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/RackAwareAssignor.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.coordinator.group.api.assignor.ConsumerGroupPartitionAssignor; +import org.apache.kafka.coordinator.group.api.assignor.GroupAssignment; +import org.apache.kafka.coordinator.group.api.assignor.GroupSpec; +import org.apache.kafka.coordinator.group.api.assignor.MemberAssignment; +import org.apache.kafka.coordinator.group.api.assignor.PartitionAssignorException; +import org.apache.kafka.coordinator.group.api.assignor.ShareGroupPartitionAssignor; +import org.apache.kafka.coordinator.group.api.assignor.SubscribedTopicDescriber; +import org.apache.kafka.coordinator.group.modern.MemberAssignmentImpl; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * The RackAwareAssignor is a consumer group partition assignor that takes into account the rack + * information of the members when assigning partitions to them. + * It needs all brokers and members to have rack information available. + */ +public class RackAwareAssignor implements ConsumerGroupPartitionAssignor, ShareGroupPartitionAssignor { + @Override + public String name() { + return "rack-aware-assignor"; + } + + @Override + public GroupAssignment assign(GroupSpec groupSpec, SubscribedTopicDescriber subscribedTopicDescriber) throws PartitionAssignorException { + Map rackIdToMemberId = new HashMap<>(); + List memberIds = new ArrayList<>(groupSpec.memberIds()); + for (String memberId : memberIds) { + if (groupSpec.memberSubscription(memberId).rackId().isEmpty()) { + throw new PartitionAssignorException("Member " + memberId + " does not have rack information available."); + } + rackIdToMemberId.put( + groupSpec.memberSubscription(memberId).rackId().get(), + memberId + ); + } + + Map>> assignments = new HashMap<>(); + for (Uuid topicId : groupSpec.memberSubscription(memberIds.get(0)).subscribedTopicIds()) { + int numPartitions = subscribedTopicDescriber.numPartitions(topicId); + if (numPartitions == -1) { + throw new PartitionAssignorException("Member is subscribed to a non-existent topic"); + } + + for (int partitionId = 0; partitionId < numPartitions; partitionId++) { + Set racks = subscribedTopicDescriber.racksForPartition(topicId, partitionId); + if (racks.isEmpty()) { + throw new PartitionAssignorException("No racks available for partition " + partitionId + " of topic " + topicId); + } + + String assignedRack = null; + for (String rack : racks) { + String memberId = rackIdToMemberId.get(rack); + if (memberId == null) { + continue; + } + assignedRack = rack; + break; + } + + if (assignedRack == null) { + throw new PartitionAssignorException("No member found for racks " + racks + " for partition " + partitionId + " of topic " + topicId); + } + + Map> assignment = assignments.computeIfAbsent( + rackIdToMemberId.get(assignedRack), + k -> new HashMap<>() + ); + Set partitions = assignment.computeIfAbsent( + topicId, + k -> new java.util.HashSet<>() + ); + partitions.add(partitionId); + } + } + + Map memberAssignments = new HashMap<>(); + for (Map.Entry>> entry : assignments.entrySet()) { + memberAssignments.put(entry.getKey(), new MemberAssignmentImpl(entry.getValue())); + } + return new GroupAssignment(memberAssignments); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/SaslPlainPlaintextConsumerTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/SaslPlainPlaintextConsumerTest.java new file mode 100644 index 0000000000000..dcd9d3a27b847 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/SaslPlainPlaintextConsumerTest.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.ClientsTestUtils; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; + +import org.junit.jupiter.api.BeforeEach; + +import java.util.Locale; +import java.util.Map; + +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testClusterResourceListener; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testCoordinatorFailover; +import static org.apache.kafka.clients.ClientsTestUtils.BaseConsumerTestcase.testSimpleConsumption; +import static org.apache.kafka.clients.CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.SECURITY_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.clients.consumer.SaslPlainPlaintextConsumerTest.MECHANISMS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_JAAS_CONFIG; +import static org.apache.kafka.common.config.SaslConfigs.SASL_MECHANISM; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG; +import static org.apache.kafka.common.test.JaasUtils.KAFKA_PLAIN_ADMIN; +import static org.apache.kafka.common.test.JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = ClientsTestUtils.BaseConsumerTestcase.BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, value = "100"), + @ClusterConfigProperty(key = SASL_ENABLED_MECHANISMS_CONFIG, value = MECHANISMS), + @ClusterConfigProperty(key = SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, value = MECHANISMS), + } +) +public class SaslPlainPlaintextConsumerTest { + + private final ClusterInstance cluster; + public static final String MECHANISMS = "PLAIN"; + public static final String SASL_JAAS = "org.apache.kafka.common.security.plain.PlainLoginModule required " + + "username=\"" + KAFKA_PLAIN_ADMIN + "\" " + + "password=\"" + KAFKA_PLAIN_ADMIN_PASSWORD + "\";"; + + public SaslPlainPlaintextConsumerTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @BeforeEach + public void setUp() throws InterruptedException { + cluster.createTopic(ClientsTestUtils.BaseConsumerTestcase.TOPIC, 2, (short) ClientsTestUtils.BaseConsumerTestcase.BROKER_COUNT); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testClassicConsumerSimpleConsumption() throws InterruptedException { + testSimpleConsumption(cluster, Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)) + ); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testAsyncConsumerSimpleConsumption() throws InterruptedException { + testSimpleConsumption(cluster, Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)) + ); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testClassicConsumerClusterResourceListener() throws InterruptedException { + testClusterResourceListener(cluster, Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT)) + ); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testAsyncConsumerClusterResourceListener() throws InterruptedException { + testClusterResourceListener(cluster, Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)) + ); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testClassicConsumerCoordinatorFailover() throws InterruptedException { + Map config = Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name().toLowerCase(Locale.ROOT), + SESSION_TIMEOUT_MS_CONFIG, 5001, + HEARTBEAT_INTERVAL_MS_CONFIG, 1000, + // Use higher poll timeout to avoid consumer leaving the group due to timeout + MAX_POLL_INTERVAL_MS_CONFIG, 15000 + ); + testCoordinatorFailover(cluster, config); + } + + @ClusterTest( + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT + ) + public void testAsyncConsumeCoordinatorFailover() throws InterruptedException { + Map config = Map.of( + SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name, + SASL_MECHANISM, MECHANISMS, + SASL_JAAS_CONFIG, SASL_JAAS, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT), + // Use higher poll timeout to avoid consumer leaving the group due to timeout + MAX_POLL_INTERVAL_MS_CONFIG, 15000 + ); + testCoordinatorFailover(cluster, config); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerRackAwareTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerRackAwareTest.java new file mode 100644 index 0000000000000..4e60b0e12ccad --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerRackAwareTest.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.ShareGroupDescription; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.test.TestUtils; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; + +public class ShareConsumerRackAwareTest { + @ClusterTest( + types = {Type.KRAFT}, + brokers = 3, + serverProperties = { + @ClusterConfigProperty(id = 0, key = "broker.rack", value = "rack0"), + @ClusterConfigProperty(id = 1, key = "broker.rack", value = "rack1"), + @ClusterConfigProperty(id = 2, key = "broker.rack", value = "rack2"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic, share"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.SHARE_GROUP_ASSIGNORS_CONFIG, value = "org.apache.kafka.clients.consumer.RackAwareAssignor") + } + ) + void testShareConsumerWithRackAwareAssignor(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + String groupId = "group0"; + String topic = "test-topic"; + try (Admin admin = clusterInstance.admin(); + Producer producer = clusterInstance.producer(); + ShareConsumer consumer0 = clusterInstance.shareConsumer(Map.of( + CommonClientConfigs.GROUP_ID_CONFIG, groupId, + CommonClientConfigs.CLIENT_ID_CONFIG, "client0", + CommonClientConfigs.CLIENT_RACK_CONFIG, "rack0" + )); + ShareConsumer consumer1 = clusterInstance.shareConsumer(Map.of( + CommonClientConfigs.GROUP_ID_CONFIG, groupId, + CommonClientConfigs.CLIENT_ID_CONFIG, "client1", + CommonClientConfigs.CLIENT_RACK_CONFIG, "rack1" + )); + ShareConsumer consumer2 = clusterInstance.shareConsumer(Map.of( + CommonClientConfigs.GROUP_ID_CONFIG, groupId, + CommonClientConfigs.CLIENT_ID_CONFIG, "client2", + CommonClientConfigs.CLIENT_RACK_CONFIG, "rack2" + )) + ) { + // Create a new topic with 1 partition on broker 0. + admin.createTopics(List.of(new NewTopic(topic, Map.of(0, List.of(0))))); + clusterInstance.waitTopicCreation(topic, 1); + + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + producer.flush(); + + consumer0.subscribe(List.of(topic)); + consumer1.subscribe(List.of(topic)); + consumer2.subscribe(List.of(topic)); + + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + Map groups = assertDoesNotThrow(() -> admin.describeShareGroups(Set.of("group0")).all().get()); + ShareGroupDescription groupDescription = groups.get(groupId); + return isExpectedAssignment(groupDescription, 3, Map.of( + "client0", Set.of(new TopicPartition(topic, 0)), + "client1", Set.of(), + "client2", Set.of() + )); + }, "Consumer 0 should be assigned to topic partition 0"); + + // Add a new partition 1 and 2 to broker 1. + admin.createPartitions( + Map.of( + topic, + NewPartitions.increaseTo(3, List.of(List.of(1), List.of(1))) + ) + ); + clusterInstance.waitTopicCreation(topic, 3); + + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + Map groups = assertDoesNotThrow(() -> admin.describeShareGroups(Set.of("group0")).all().get()); + ShareGroupDescription groupDescription = groups.get(groupId); + return isExpectedAssignment(groupDescription, 3, Map.of( + "client0", Set.of(new TopicPartition(topic, 0)), + "client1", Set.of(new TopicPartition(topic, 1), new TopicPartition(topic, 2)), + "client2", Set.of() + )); + }, "Consumer 1 should be assigned to topic partition 1 and 2"); + + // Add a new partition 3, 4, and 5 to broker 2. + admin.createPartitions( + Map.of( + topic, + NewPartitions.increaseTo(6, List.of(List.of(2), List.of(2), List.of(2))) + ) + ); + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + Map groups = assertDoesNotThrow(() -> admin.describeShareGroups(Set.of("group0")).all().get()); + ShareGroupDescription groupDescription = groups.get(groupId); + return isExpectedAssignment(groupDescription, 3, Map.of( + "client0", Set.of(new TopicPartition(topic, 0)), + "client1", Set.of(new TopicPartition(topic, 1), new TopicPartition(topic, 2)), + "client2", Set.of(new TopicPartition(topic, 3), new TopicPartition(topic, 4), new TopicPartition(topic, 5)) + )); + }, "Consumer 2 should be assigned to topic partition 3, 4, and 5"); + + // Change partitions to different brokers. + // partition 0 -> broker 2 + // partition 1 -> broker 2 + // partition 2 -> broker 2 + // partition 3 -> broker 1 + // partition 4 -> broker 1 + // partition 5 -> broker 0 + admin.alterPartitionReassignments(Map.of( + new TopicPartition(topic, 0), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 1), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 2), Optional.of(new NewPartitionReassignment(List.of(2))), + new TopicPartition(topic, 3), Optional.of(new NewPartitionReassignment(List.of(1))), + new TopicPartition(topic, 4), Optional.of(new NewPartitionReassignment(List.of(1))), + new TopicPartition(topic, 5), Optional.of(new NewPartitionReassignment(List.of(0))) + )).all().get(); + TestUtils.waitForCondition(() -> { + consumer0.poll(Duration.ofMillis(1000)); + consumer1.poll(Duration.ofMillis(1000)); + consumer2.poll(Duration.ofMillis(1000)); + Map groups = assertDoesNotThrow(() -> admin.describeShareGroups(Set.of("group0")).all().get()); + ShareGroupDescription groupDescription = groups.get(groupId); + return isExpectedAssignment(groupDescription, 3, Map.of( + "client0", Set.of(new TopicPartition(topic, 5)), + "client1", Set.of(new TopicPartition(topic, 3), new TopicPartition(topic, 4)), + "client2", Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(topic, 2)) + )); + }, "Consumer with topic partition mapping should be 0 -> 5 | 1 -> 3, 4 | 2 -> 0, 1, 2"); + } + } + + boolean isExpectedAssignment( + ShareGroupDescription groupDescription, + int memberCount, + Map> expectedAssignments + ) { + return groupDescription != null && + groupDescription.members().size() == memberCount && + groupDescription.members().stream().allMatch( + member -> { + String clientId = member.clientId(); + Set expectedPartitions = expectedAssignments.get(clientId); + return member.assignment().topicPartitions().equals(expectedPartitions); + } + ); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerTest.java index e402a4344c109..998ac2c585d59 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/consumer/ShareConsumerTest.java @@ -36,10 +36,13 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.GroupMaxSizeReachedException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; @@ -66,6 +69,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Timeout; +import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -111,10 +115,9 @@ @Timeout(1200) @Tag("integration") @ClusterTestDefaults( + types = {Type.KRAFT}, serverProperties = { @ClusterConfigProperty(key = "auto.create.topics.enable", value = "false"), - @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - @ClusterConfigProperty(key = "group.share.enable", value = "true"), @ClusterConfigProperty(key = "group.share.partition.max.record.locks", value = "10000"), @ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000"), @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), @@ -122,10 +125,8 @@ @ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "3"), @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), @ClusterConfigProperty(key = "transaction.state.log.min.isr", value = "1"), - @ClusterConfigProperty(key = "transaction.state.log.replication.factor", value = "1"), - @ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") - }, - types = {Type.KRAFT} + @ClusterConfigProperty(key = "transaction.state.log.replication.factor", value = "1") + } ) public class ShareConsumerTest { private final ClusterInstance cluster; @@ -361,7 +362,7 @@ public void testAcknowledgementCommitCallbackSuccessfulAcknowledgement() throws return partitionOffsetsMap.containsKey(tp); }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive call to callback"); - // We expect no exception as the acknowledgment error code is null. + // We expect no exception as the acknowledgement error code is null. assertFalse(partitionExceptionMap.containsKey(tp)); verifyShareGroupStateTopicRecordsProduced(); } @@ -390,7 +391,7 @@ public void testAcknowledgementCommitCallbackOnClose() { shareConsumer.poll(Duration.ofMillis(1000)); shareConsumer.close(); - // We expect no exception as the acknowledgment error code is null. + // We expect no exception as the acknowledgement error code is null. assertFalse(partitionExceptionMap.containsKey(tp)); verifyShareGroupStateTopicRecordsProduced(); } @@ -466,6 +467,8 @@ public void testHeaders() { int numRecords = 1; ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); record.headers().add("headerKey", "headerValue".getBytes()); + record.headers().add("headerKey2", "headerValue2".getBytes()); + record.headers().add("headerKey3", "headerValue3".getBytes()); producer.send(record); producer.flush(); @@ -474,11 +477,15 @@ public void testHeaders() { List> records = consumeRecords(shareConsumer, numRecords); assertEquals(numRecords, records.size()); - for (ConsumerRecord consumerRecord : records) { - Header header = consumerRecord.headers().lastHeader("headerKey"); - if (header != null) - assertEquals("headerValue", new String(header.value())); - } + Header header = records.get(0).headers().lastHeader("headerKey"); + assertEquals("headerValue", new String(header.value())); + + // Test the order of headers in a record is preserved when producing and consuming + Header[] headers = records.get(0).headers().toArray(); + assertEquals("headerKey", headers[0].key()); + assertEquals("headerKey2", headers[1].key()); + assertEquals("headerKey3", headers[2].key()); + verifyShareGroupStateTopicRecordsProduced(); } } @@ -845,6 +852,144 @@ public void testExplicitAcknowledgeThrowsNotInBatch() { } } + @ClusterTest + public void testExplicitOverrideAcknowledgeCorruptedMessage() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer = createShareConsumer( + "group1", + Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT), + null, + mockErrorDeserializer(3))) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer.subscribe(Set.of(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofSeconds(60)); + assertEquals(2, records.count()); + Iterator> iterator = records.iterator(); + + ConsumerRecord firstRecord = iterator.next(); + ConsumerRecord secondRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + assertEquals(1L, secondRecord.offset()); + shareConsumer.acknowledge(firstRecord); + shareConsumer.acknowledge(secondRecord); + + RecordDeserializationException rde = assertThrows(RecordDeserializationException.class, () -> shareConsumer.poll(Duration.ofSeconds(60))); + assertEquals(2, rde.offset()); + shareConsumer.commitSync(); + + // The corrupted record was automatically released, so we can still obtain it. + rde = assertThrows(RecordDeserializationException.class, () -> shareConsumer.poll(Duration.ofSeconds(60))); + assertEquals(2, rde.offset()); + + // Reject this record + shareConsumer.acknowledge(rde.topicPartition().topic(), rde.topicPartition().partition(), rde.offset(), AcknowledgeType.REJECT); + shareConsumer.commitSync(); + + records = shareConsumer.poll(Duration.ZERO); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @ClusterTest + public void testExplicitAcknowledgeOffsetThrowsNotException() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer = createShareConsumer( + "group1", + Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT))) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Set.of(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofSeconds(60)); + assertEquals(1, records.count()); + ConsumerRecord consumedRecord = records.records(tp).get(0); + assertEquals(0L, consumedRecord.offset()); + + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(tp.topic(), tp.partition(), consumedRecord.offset(), AcknowledgeType.ACCEPT)); + + shareConsumer.acknowledge(consumedRecord); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @ClusterTest + public void testExplicitAcknowledgeOffsetThrowsParametersError() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer = createShareConsumer( + "group1", + Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT), + null, + mockErrorDeserializer(2))) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.flush(); + + shareConsumer.subscribe(Set.of(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofSeconds(60)); + assertEquals(1, records.count()); + Iterator> iterator = records.iterator(); + + ConsumerRecord firstRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + shareConsumer.acknowledge(firstRecord); + + final RecordDeserializationException rde = assertThrows(RecordDeserializationException.class, () -> shareConsumer.poll(Duration.ofSeconds(60))); + assertEquals(1, rde.offset()); + + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge("foo", rde.topicPartition().partition(), rde.offset(), AcknowledgeType.REJECT)); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(rde.topicPartition().topic(), 1, rde.offset(), AcknowledgeType.REJECT)); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(rde.topicPartition().topic(), tp2.partition(), 0, AcknowledgeType.REJECT)); + + // Reject this record. + shareConsumer.acknowledge(rde.topicPartition().topic(), rde.topicPartition().partition(), rde.offset(), AcknowledgeType.REJECT); + shareConsumer.commitSync(); + + // The next acknowledge() should throw an IllegalStateException as the record has been acked. + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(rde.topicPartition().topic(), rde.topicPartition().partition(), rde.offset(), AcknowledgeType.REJECT)); + + records = shareConsumer.poll(Duration.ZERO); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + private ByteArrayDeserializer mockErrorDeserializer(int recordNumber) { + int recordIndex = recordNumber - 1; + return new ByteArrayDeserializer() { + int i = 0; + + @Override + public byte[] deserialize(String topic, Headers headers, ByteBuffer data) { + if (i == recordIndex) { + throw new SerializationException(); + } else { + i++; + return super.deserialize(topic, headers, data); + } + } + }; + } + @ClusterTest public void testImplicitAcknowledgeFailsExplicit() { alterShareAutoOffsetReset("group1", "earliest"); @@ -1209,22 +1354,25 @@ public void testConsumerCloseInGroupSequential() { int consumer1MessageCount = 0; int consumer2MessageCount = 0; - // Poll three times to receive records. The second poll acknowledges the records - // from the first poll, and so on. The third poll's records are not acknowledged + // Poll until we receive all the records. The second poll acknowledges the records + // from the first poll, and so on. + // The last poll's records are not acknowledged // because the consumer is closed, which makes the broker release the records fetched. - ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - consumer1MessageCount += records1.count(); - int consumer1MessageCountA = records1.count(); - records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - consumer1MessageCount += records1.count(); - int consumer1MessageCountB = records1.count(); - records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - int consumer1MessageCountC = records1.count(); - assertEquals(totalMessages, consumer1MessageCountA + consumer1MessageCountB + consumer1MessageCountC); - shareConsumer1.close(); - int maxRetries = 10; int retries = 0; + int lastPollRecordCount = 0; + while (consumer1MessageCount < totalMessages && retries < maxRetries) { + lastPollRecordCount = shareConsumer1.poll(Duration.ofMillis(5000)).count(); + consumer1MessageCount += lastPollRecordCount; + retries++; + } + assertEquals(totalMessages, consumer1MessageCount); + shareConsumer1.close(); + + // These records are released after the first consumer closes. + consumer1MessageCount -= lastPollRecordCount; + + retries = 0; while (consumer1MessageCount + consumer2MessageCount < totalMessages && retries < maxRetries) { ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(5000)); consumer2MessageCount += records2.count(); @@ -1358,7 +1506,7 @@ public void testAcknowledgementCommitCallbackCallsShareConsumerDisallowed() { shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWithShareConsumer<>(shareConsumer)); shareConsumer.subscribe(Set.of(tp.topic())); - // The acknowledgment commit callback will try to call a method of ShareConsumer + // The acknowledgement commit callback will try to call a method of ShareConsumer shareConsumer.poll(Duration.ofMillis(5000)); // The second poll sends the acknowledgements implicitly. // The acknowledgement commit callback will be called and the exception is thrown. @@ -1398,14 +1546,14 @@ public void testAcknowledgementCommitCallbackCallsShareConsumerWakeup() throws I producer.send(record); producer.flush(); - // The acknowledgment commit callback will try to call a method of ShareConsumer + // The acknowledgement commit callback will try to call a method of ShareConsumer shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWakeup<>(shareConsumer)); shareConsumer.subscribe(Set.of(tp.topic())); TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); - // The second poll sends the acknowledgments implicitly. + // The second poll sends the acknowledgements implicitly. shareConsumer.poll(Duration.ofMillis(2000)); // Till now acknowledgement commit callback has not been called, so no exception thrown yet. @@ -1713,11 +1861,9 @@ public void testShareAutoOffsetResetEarliest() { public void testShareAutoOffsetResetEarliestAfterLsoMovement() { alterShareAutoOffsetReset("group1", "earliest"); try ( - ShareConsumer shareConsumer = createShareConsumer("group1"); Producer producer = createProducer(); Admin adminClient = createAdminClient() ) { - shareConsumer.subscribe(Set.of(tp.topic())); ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. @@ -1815,8 +1961,7 @@ public void testShareAutoOffsetResetByDuration() throws Exception { // Set the auto offset reset to 3 hours before current time // so the consumer should consume all messages (3 records) alterShareAutoOffsetReset("group2", "by_duration:PT3H"); - try (ShareConsumer shareConsumer = createShareConsumer("group2"); - Producer producer = createProducer()) { + try (ShareConsumer shareConsumer = createShareConsumer("group2")) { shareConsumer.subscribe(Set.of(tp.topic())); List> records = consumeRecords(shareConsumer, 3); @@ -1826,7 +1971,7 @@ public void testShareAutoOffsetResetByDuration() throws Exception { } @ClusterTest - public void testShareAutoOffsetResetByDurationInvalidFormat() throws Exception { + public void testShareAutoOffsetResetByDurationInvalidFormat() { // Test invalid duration format ConfigResource configResource = new ConfigResource(ConfigResource.Type.GROUP, "group1"); Map> alterEntries = new HashMap<>(); @@ -1852,15 +1997,12 @@ public void testShareAutoOffsetResetByDurationInvalidFormat() throws Exception { brokers = 3, serverProperties = { @ClusterConfigProperty(key = "auto.create.topics.enable", value = "false"), - @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - @ClusterConfigProperty(key = "group.share.enable", value = "true"), @ClusterConfigProperty(key = "group.share.partition.max.record.locks", value = "10000"), @ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000"), @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "3"), @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "3"), @ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "3"), - @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "3"), - @ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "3") } ) @Timeout(90) @@ -1975,10 +2117,7 @@ public void testShareConsumerAfterCoordinatorMovement() throws Exception { ); // top the producer after some time (but after coordinator shutdown) - service.schedule(() -> { - prodState.done().set(true); - }, 10L, TimeUnit.SECONDS - ); + service.schedule(() -> prodState.done().set(true), 10L, TimeUnit.SECONDS); // wait for both producer and consumer to finish TestUtils.waitForCondition( @@ -2000,19 +2139,114 @@ public void testShareConsumerAfterCoordinatorMovement() throws Exception { verifyShareGroupStateTopicRecordsProduced(); } + @ClusterTest + public void testDeliveryCountNotIncreaseAfterSessionClose() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer()) { + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. + for (int i = 0; i < 10; i++) { + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); + } + } + + // Perform the fetch, close in a loop. + for (int count = 0; count < ShareGroupConfig.SHARE_GROUP_DELIVERY_COUNT_LIMIT_DEFAULT; count++) { + consumeMessages(new AtomicInteger(0), 10, "group1", 1, 10, false); + } + + // If the delivery count is increased, consumer will get nothing. + int consumedMessageCount = consumeMessages(new AtomicInteger(0), 10, "group1", 1, 10, true); + // The records returned belong to offsets 0-9. + assertEquals(10, consumedMessageCount); + verifyShareGroupStateTopicRecordsProduced(); + } + + @ClusterTest + public void testDeliveryCountDifferentBehaviorWhenClosingSessionWithExplicitAcknowledgement() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer = createShareConsumer( + "group1", + Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT))) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Set.of(tp.topic())); + ConsumerRecords records = waitedPoll(shareConsumer, 2500L, 2); + assertEquals(2, records.count()); + // Acknowledge the first record with AcknowledgeType.RELEASE + shareConsumer.acknowledge(records.records(tp).get(0), AcknowledgeType.RELEASE); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + } + + // Test delivery count + try (ShareConsumer shareConsumer = createShareConsumer("group1", Map.of())) { + shareConsumer.subscribe(Set.of(tp.topic())); + ConsumerRecords records = waitedPoll(shareConsumer, 2500L, 2); + assertEquals(2, records.count()); + assertEquals((short) 2, records.records(tp).get(0).deliveryCount().get()); + assertEquals((short) 1, records.records(tp).get(1).deliveryCount().get()); + } + } + + @ClusterTest( + serverProperties = { + @ClusterConfigProperty(key = "group.share.delivery.count.limit", value = "2"), + } + ) + public void testBehaviorOnDeliveryCountBoundary() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer = createShareConsumer( + "group1", + Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT))) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Set.of(tp.topic())); + ConsumerRecords records = waitedPoll(shareConsumer, 2500L, 1); + assertEquals(1, records.count()); + assertEquals((short) 1, records.records(tp).get(0).deliveryCount().get()); + // Acknowledge the record with AcknowledgeType.RELEASE. + shareConsumer.acknowledge(records.records(tp).get(0), AcknowledgeType.RELEASE); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + + // Consume again, the delivery count should be 2. + records = waitedPoll(shareConsumer, 2500L, 1); + assertEquals(1, records.count()); + assertEquals((short) 2, records.records(tp).get(0).deliveryCount().get()); + + } + + // Start again and same record should be delivered + try (ShareConsumer shareConsumer = createShareConsumer("group1", Map.of())) { + shareConsumer.subscribe(Set.of(tp.topic())); + ConsumerRecords records = waitedPoll(shareConsumer, 2500L, 1); + assertEquals(1, records.count()); + assertEquals((short) 2, records.records(tp).get(0).deliveryCount().get()); + } + } + @ClusterTest( brokers = 3, serverProperties = { @ClusterConfigProperty(key = "auto.create.topics.enable", value = "false"), - @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - @ClusterConfigProperty(key = "group.share.enable", value = "true"), @ClusterConfigProperty(key = "group.share.partition.max.record.locks", value = "10000"), @ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000"), @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "3"), @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "3"), @ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "3"), - @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "3"), - @ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "3") } ) @Timeout(150) @@ -2026,7 +2260,7 @@ public void testComplexShareConsumer() throws Exception { ClientState prodState = new ClientState(); - // produce messages until we want + // Produce messages until we want. service.execute(() -> { try (Producer producer = createProducer()) { while (!prodState.done().get()) { @@ -2038,13 +2272,14 @@ public void testComplexShareConsumer() throws Exception { } }); - // init a complex share consumer + // Init a complex share consumer. ComplexShareConsumer complexCons1 = new ComplexShareConsumer<>( cluster.bootstrapServers(), topicName, groupId, Map.of(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, EXPLICIT) ); + alterShareAutoOffsetReset(groupId, "earliest"); service.schedule( complexCons1, @@ -2052,22 +2287,107 @@ public void testComplexShareConsumer() throws Exception { TimeUnit.MILLISECONDS ); - // let the complex consumer read the messages - service.schedule(() -> { - prodState.done().set(true); - }, 10L, TimeUnit.SECONDS - ); + // Let the complex consumer read the messages. + service.schedule(() -> prodState.done().set(true), 5L, TimeUnit.SECONDS); - // all messages which can be read are read, some would be redelivered + // All messages which can be read are read, some would be redelivered (roughly 3 times the records produced). TestUtils.waitForCondition(complexCons1::isDone, 45_000L, () -> "did not close!"); + int delta = complexCons1.recordsRead() - (int) (prodState.count().get() * 3 * 0.95); // 3 times with margin of error (5%). - assertTrue(prodState.count().get() < complexCons1.recordsRead()); + assertTrue(delta > 0, + String.format("Producer (%d) and share consumer (%d) record count mismatch.", prodState.count().get(), complexCons1.recordsRead())); shutdownExecutorService(service); verifyShareGroupStateTopicRecordsProduced(); } + @ClusterTest( + brokers = 1, + serverProperties = { + @ClusterConfigProperty(key = "auto.create.topics.enable", value = "false"), + @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + @ClusterConfigProperty(key = "group.share.enable", value = "true"), + @ClusterConfigProperty(key = "group.share.partition.max.record.locks", value = "10000"), + @ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "share.coordinator.state.topic.min.isr", value = "1"), + @ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "3"), + @ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "transaction.state.log.min.isr", value = "1"), + @ClusterConfigProperty(key = "transaction.state.log.replication.factor", value = "1"), + @ClusterConfigProperty(key = "group.share.max.size", value = "3") // Setting max group size to 3 + } + ) + public void testShareGroupMaxSizeConfigExceeded() throws Exception { + // creating 3 consumers in the group1 + ShareConsumer shareConsumer1 = createShareConsumer("group1"); + ShareConsumer shareConsumer2 = createShareConsumer("group1"); + ShareConsumer shareConsumer3 = createShareConsumer("group1"); + + shareConsumer1.subscribe(Set.of(tp.topic())); + shareConsumer2.subscribe(Set.of(tp.topic())); + shareConsumer3.subscribe(Set.of(tp.topic())); + + shareConsumer1.poll(Duration.ofMillis(5000)); + shareConsumer2.poll(Duration.ofMillis(5000)); + shareConsumer3.poll(Duration.ofMillis(5000)); + + ShareConsumer shareConsumer4 = createShareConsumer("group1"); + shareConsumer4.subscribe(Set.of(tp.topic())); + + TestUtils.waitForCondition(() -> { + try { + shareConsumer4.poll(Duration.ofMillis(5000)); + } catch (GroupMaxSizeReachedException e) { + return true; + } catch (Exception e) { + return false; + } + return false; + }, 30000, 200L, () -> "The 4th consumer was not kicked out of the group"); + + shareConsumer1.close(); + shareConsumer2.close(); + shareConsumer3.close(); + shareConsumer4.close(); + } + + @ClusterTest( + brokers = 1, + serverProperties = { + @ClusterConfigProperty(key = "group.share.max.size", value = "1"), // Setting max group size to 1 + @ClusterConfigProperty(key = "group.share.max.share.sessions", value = "1") // Setting max share sessions value to 1 + } + ) + public void testShareGroupShareSessionCacheIsFull() { + alterShareAutoOffsetReset("group1", "earliest"); + try (Producer producer = createProducer(); + ShareConsumer shareConsumer1 = createShareConsumer("group1"); + ShareConsumer shareConsumer2 = createShareConsumer("group2")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer1.subscribe(Set.of(tp.topic())); + shareConsumer2.subscribe(Set.of(tp.topic())); + + ConsumerRecords records = waitedPoll(shareConsumer1, 2500L, 1); + assertEquals(1, records.count()); + + producer.send(record); + producer.flush(); + + // The second share consumer should not throw any exception, but should not receive any records as well. + records = shareConsumer2.poll(Duration.ofMillis(1000)); + + assertEquals(0, records.count()); + + shareConsumer1.close(); + shareConsumer2.close(); + } + } + @ClusterTest public void testReadCommittedIsolationLevel() { alterShareAutoOffsetReset("group1", "earliest"); @@ -2455,8 +2775,9 @@ private void produceAbortedTransaction(Producer transactionalPro try { transactionalProducer.beginTransaction(); ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, message.getBytes(), message.getBytes()); - transactionalProducer.send(record); + Future future = transactionalProducer.send(record); transactionalProducer.flush(); + future.get(); // Ensure producer send is complete before aborting transactionalProducer.abortTransaction(); } catch (Exception e) { transactionalProducer.abortTransaction(); @@ -2498,8 +2819,7 @@ private int consumeMessages(AtomicInteger totalMessagesConsumed, int maxPolls, boolean commit) { return assertDoesNotThrow(() -> { - try (ShareConsumer shareConsumer = createShareConsumer( - groupId)) { + try (ShareConsumer shareConsumer = createShareConsumer(groupId)) { shareConsumer.subscribe(Set.of(tp.topic())); return consumeMessages(shareConsumer, totalMessagesConsumed, totalMessages, consumerNumber, maxPolls, commit); } @@ -2621,13 +2941,22 @@ private ShareConsumer createShareConsumer(String groupId) { private ShareConsumer createShareConsumer( String groupId, Map additionalProperties + ) { + return createShareConsumer(groupId, additionalProperties, null, null); + } + + private ShareConsumer createShareConsumer( + String groupId, + Map additionalProperties, + Deserializer keyDeserializer, + Deserializer valueDeserializer ) { Properties props = new Properties(); props.putAll(additionalProperties); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); Map conf = new HashMap<>(); props.forEach((k, v) -> conf.put((String) k, v)); - return cluster.shareConsumer(conf); + return cluster.shareConsumer(conf, keyDeserializer, valueDeserializer); } private void warmup() throws InterruptedException { @@ -2766,10 +3095,6 @@ private static class ComplexShareConsumer implements Runnable { this.configs.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); } - void stop() { - state.done().set(true); - } - @Override public void run() { try (ShareConsumer consumer = new KafkaShareConsumer<>(configs)) { diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerFailureHandlingTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerFailureHandlingTest.java index 82cac8ae0ba3b..546c0a733e3b0 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerFailureHandlingTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerFailureHandlingTest.java @@ -224,7 +224,7 @@ void testCannotSendToInternalTopic(ClusterInstance clusterInstance) throws Inter .groupMetadataTopicConfigs() .forEach((k, v) -> topicConfig.put(k.toString(), v.toString())); admin.createTopics(List.of(new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, 1, (short) 1).configs(topicConfig))); - clusterInstance.waitForTopic(Topic.GROUP_METADATA_TOPIC_NAME, 0); + clusterInstance.waitTopicDeletion(Topic.GROUP_METADATA_TOPIC_NAME); } try (Producer producer = clusterInstance.producer(producerConfig(1))) { @@ -276,16 +276,11 @@ private void checkTooLargeRecordForReplicationWithAckAll(ClusterInstance cluster // create topic String topic10 = "topic10"; - try (Admin admin = clusterInstance.admin()) { - admin.createTopics(List.of(new NewTopic(topic10, brokerSize, (short) brokerSize).configs(topicConfig))); - clusterInstance.waitTopicDeletion("topic10"); - } + clusterInstance.createTopic(topic10, brokerSize, (short) brokerSize, topicConfig); // send a record that is too large for replication, but within the broker max message limit - byte[] value = - new byte[maxMessageSize - DefaultRecordBatch.RECORD_BATCH_OVERHEAD - DefaultRecord.MAX_RECORD_OVERHEAD]; - Producer producer = clusterInstance.producer(producerConfig(-1)); - try (producer) { + byte[] value = new byte[maxMessageSize - DefaultRecordBatch.RECORD_BATCH_OVERHEAD - DefaultRecord.MAX_RECORD_OVERHEAD]; + try (Producer producer = clusterInstance.producer(producerConfig(-1))) { ProducerRecord producerRecord = new ProducerRecord<>(topic10, null, value); RecordMetadata recordMetadata = producer.send(producerRecord).get(); diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerIdExpirationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerIdExpirationTest.java index f79b3786253e4..a9489c88327ca 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerIdExpirationTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerIdExpirationTest.java @@ -204,10 +204,10 @@ void testDynamicProducerIdExpirationMs(ClusterInstance cluster) throws Interrupt // Update the producer ID expiration ms to a very high value. admin.incrementalAlterConfigs(producerIdExpirationConfig("100000")); - cluster.brokers().values().forEach(broker -> { + cluster.brokers().values().forEach(broker -> TestUtils.waitUntilTrue(() -> broker.logManager().producerStateManagerConfig().producerIdExpirationMs() == 100000, - () -> "Configuration was not updated.", DEFAULT_MAX_WAIT_MS, 100); - }); + () -> "Configuration was not updated.", DEFAULT_MAX_WAIT_MS, 100) + ); // Send more records to send producer ID back to brokers. producer.send(new ProducerRecord<>(topic1, 0, null, "key".getBytes(), "value".getBytes())); producer.flush(); @@ -226,10 +226,10 @@ void testDynamicProducerIdExpirationMs(ClusterInstance cluster) throws Interrupt kafkaBroker.awaitShutdown(); kafkaBroker.startup(); cluster.waitForReadyBrokers(); - cluster.brokers().values().forEach(broker -> { + cluster.brokers().values().forEach(broker -> TestUtils.waitUntilTrue(() -> broker.logManager().producerStateManagerConfig().producerIdExpirationMs() == 100, - () -> "Configuration was not updated.", DEFAULT_MAX_WAIT_MS, 100); - }); + () -> "Configuration was not updated.", DEFAULT_MAX_WAIT_MS, 100) + ); // Ensure producer ID expires quickly again. waitProducerIdExpire(admin); diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerSendWhileDeletionTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerSendWhileDeletionTest.java new file mode 100644 index 0000000000000..aa93431cf6304 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/producer/ProducerSendWhileDeletionTest.java @@ -0,0 +1,341 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.producer; + +import kafka.server.KafkaBroker; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.server.config.ServerLogConfigs; +import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile; +import org.apache.kafka.storage.internals.log.UnifiedLog; +import org.apache.kafka.test.TestUtils; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.kafka.clients.producer.ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.MAX_BLOCK_MS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerSendWhileDeletionTest.BROKER_COUNT; +import static org.apache.kafka.server.config.ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG; +import static org.apache.kafka.server.config.ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG; +import static org.apache.kafka.server.config.ServerLogConfigs.NUM_PARTITIONS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +@ClusterTestDefaults( + types = {Type.KRAFT}, + brokers = BROKER_COUNT, + serverProperties = { + @ClusterConfigProperty(key = NUM_PARTITIONS_CONFIG, value = "2"), + @ClusterConfigProperty(key = DEFAULT_REPLICATION_FACTOR_CONFIG, value = "2"), + @ClusterConfigProperty(key = AUTO_LEADER_REBALANCE_ENABLE_CONFIG, value = "false"), + @ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000"), + @ClusterConfigProperty(key = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, value = "100") + } +) +public class ProducerSendWhileDeletionTest { + + public static final int BROKER_COUNT = 2; + private static final int DEFAULT_LINGER_MS = 5; + private final int numRecords = 10; + private final String topic = "topic"; + private final ClusterInstance cluster; + + public ProducerSendWhileDeletionTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + /** + * Tests that Producer gets self-recovered when a topic is deleted mid-way of produce. + *

+ * Producer will attempt to send messages to the partition specified in each record, and should + * succeed as long as the partition is included in the metadata. + */ + @ClusterTest + public void testSendWithTopicDeletionMidWay() throws Exception { + try (var admin = cluster.admin(); + var producer = createProducer() + ) { + // Create topic with leader as 0 for the 2 partitions. + var topicAssignment = Map.of( + 0, List.of(0, 1), + 1, List.of(0, 1) + ); + admin.createTopics(List.of(new NewTopic(topic, topicAssignment))); + + // Change leader to 1 for both the partitions to increase leader epoch from 0 -> 1 + var reassignment = Map.of( + new TopicPartition(topic, 0), Optional.of(new NewPartitionReassignment(List.of(1, 0))), + new TopicPartition(topic, 1), Optional.of(new NewPartitionReassignment(List.of(1, 0))) + ); + admin.alterPartitionReassignments(reassignment).all().get(); + + for (var i = 1; i <= numRecords; i++) { + var resp = producer.send( + new ProducerRecord<>(topic, null, ("value" + i).getBytes()) + ).get(); + assertEquals(topic, resp.topic()); + } + + // Start topic deletion + admin.deleteTopics(List.of(topic)).all().get(); + // Verify that the topic is deleted when no metadata request comes in + verifyTopicDeletion(); + + // Producer should be able to send messages even after topic gets deleted and auto-created + var finalResp = producer.send(new ProducerRecord<>(topic, null, "value".getBytes())).get(); + assertEquals(topic, finalResp.topic()); + } + } + + /** + * Tests that Producer produce to new topic id after recreation. + *

+ * Producer will attempt to send messages to the partition specified in each record, and should + * succeed as long as the metadata has been updated with new topic id. + */ + @ClusterTest + public void testSendWithRecreatedTopic() throws Exception { + try (var admin = cluster.admin(); + var producer = createProducer() + ) { + cluster.createTopic(topic, 1, (short) 1); + var topicId = topicMetadata().topicId(); + + for (int i = 1; i <= numRecords; i++) { + var resp = producer.send(new ProducerRecord<>(topic, null, ("value" + i).getBytes())).get(); + assertEquals(topic, resp.topic()); + } + + // Start topic deletion + admin.deleteTopics(List.of(topic)).all().get(); + + // Verify that the topic is deleted when no metadata request comes in + verifyTopicDeletion(); + cluster.createTopic(topic, 1, (short) 1); + assertNotEquals(topicId, topicMetadata().topicId()); + + // Producer should be able to send messages even after topic gets recreated + var recordMetadata = producer.send(new ProducerRecord<>(topic, null, "value".getBytes(StandardCharsets.UTF_8))).get(); + assertEquals(topic, recordMetadata.topic()); + assertEquals(0, recordMetadata.offset()); + } + } + + @ClusterTest + public void testSendWhileTopicGetRecreated() { + int maxNumTopicRecreationAttempts = 5; + var recreateTopicFuture = CompletableFuture.supplyAsync(() -> { + var topicIds = new HashSet(); + while (topicIds.size() < maxNumTopicRecreationAttempts) { + try (var admin = cluster.admin()) { + if (admin.listTopics().names().get().contains(topic)) { + admin.deleteTopics(List.of(topic)).all().get(); + } + topicIds.add(admin.createTopics(List.of(new NewTopic(topic, 2, (short) 1))).topicId(topic).get()); + } catch (Exception e) { + // ignore + } + } + return topicIds; + }); + + AtomicInteger numAcks = new AtomicInteger(0); + var producerFuture = CompletableFuture.runAsync(() -> { + try (var producer = createProducer()) { + for (int i = 1; i <= numRecords; i++) { + producer.send(new ProducerRecord<>(topic, null, ("value" + i).getBytes()), + (metadata, exception) -> numAcks.incrementAndGet() + ); + } + producer.flush(); + } + }); + var topicIds = recreateTopicFuture.join(); + producerFuture.join(); + assertEquals(maxNumTopicRecreationAttempts, topicIds.size()); + assertEquals(numRecords, numAcks.intValue()); + } + + @ClusterTest + public void testSendWithTopicReassignmentIsMidWay() throws Exception { + var partition0 = new TopicPartition(topic, 0); + + try (var admin = cluster.admin(); + var producer = createProducer() + ) { + // Create topic with leader as 0 for the 1 partition. + admin.createTopics(List.of(new NewTopic(topic, Map.of(0, List.of(0))))); + assertLeader(partition0, 0); + + var topicDetails = topicMetadata(); + for (var i = 1; i <= numRecords; i++) { + var resp = producer.send(new ProducerRecord<>(topic, null, ("value" + i).getBytes())).get(); + assertEquals(topic, resp.topic()); + } + + var reassignment = Map.of( + partition0, Optional.of(new NewPartitionReassignment(List.of(1))) + ); + // Change replica assignment from 0 to 1. Leadership moves to 1. + admin.alterPartitionReassignments(reassignment).all().get(); + + assertLeader(partition0, 1); + assertEquals(topicDetails.topicId(), topicMetadata().topicId()); + + // Producer should be able to send messages even after topic gets reassigned + var recordMetadata = producer.send(new ProducerRecord<>(topic, null, "value".getBytes())).get(); + assertEquals(topic, recordMetadata.topic()); + } + } + + private Producer createProducer() { + return cluster.producer(Map.of( + MAX_BLOCK_MS_CONFIG, 5000L, + REQUEST_TIMEOUT_MS_CONFIG, 10000, + DELIVERY_TIMEOUT_MS_CONFIG, 10000 + DEFAULT_LINGER_MS, + KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName() + )); + } + + private void verifyTopicDeletion() throws InterruptedException { + var topicPartitions = List.of( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1) + ); + + // ensure that the topic-partition has been deleted from all brokers' replica managers + TestUtils.waitForCondition(() -> + cluster.brokers().values().stream() + .allMatch(broker -> topicPartitions.stream() + .allMatch(tp -> broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica manager's should have deleted all of this topic's partitions"); + + // ensure that logs from all replicas are deleted + TestUtils.waitForCondition(() -> cluster.brokers().values().stream() + .allMatch(broker -> topicPartitions.stream() + .allMatch(tp -> broker.logManager().getLog(tp, false).isEmpty()) + ), "Replica logs not deleted after delete topic is complete"); + + // ensure that topic is removed from all cleaner offsets + TestUtils.waitForCondition(() -> cluster.brokers().values().stream() + .allMatch(broker -> topicPartitions.stream() + .allMatch(tp -> partitionNotInCheckpoint(broker, tp)) + ), "Cleaner offset for deleted partition should have been removed"); + + TestUtils.waitForCondition(() -> cluster.brokers().values().stream() + .allMatch(broker -> broker.config().logDirs().stream() + .allMatch(logDir -> topicPartitions.stream().noneMatch(tp -> + new File(logDir, tp.topic() + "-" + tp.partition()).exists()) + ) + ), "Failed to soft-delete the data to a delete directory"); + + TestUtils.waitForCondition(() -> cluster.brokers().values().stream() + .allMatch(broker -> broker.config().logDirs().stream() + .allMatch(logDir -> deletionDirectoriesAbsent(logDir, topicPartitions)) + ), "Failed to hard-delete the delete directory"); + } + + private boolean partitionNotInCheckpoint(KafkaBroker broker, TopicPartition tp) { + List liveLogDirs = new ArrayList<>(); + broker.logManager().liveLogDirs().foreach(liveLogDirs::add); + var checkpoints = liveLogDirs.stream().map(logDir -> { + try { + return new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint"), null).read(); + } catch (Exception e) { + return new HashMap(); + } + }).toList(); + return checkpoints.stream().noneMatch(checkpointsPerLogDir -> + checkpointsPerLogDir.containsKey(tp)); + } + + private boolean deletionDirectoriesAbsent(String logDir, List topicPartitions) { + var directoryNames = new File(logDir).list(); + if (directoryNames == null) { + return true; + } + return topicPartitions.stream().allMatch(tp -> + Arrays.stream(directoryNames).noneMatch(directoryName -> + directoryName.startsWith(tp.topic() + "-" + tp.partition()) && + directoryName.endsWith(UnifiedLog.DELETE_DIR_SUFFIX))); + } + + private TopicDescription topicMetadata() throws Exception { + try (var admin = cluster.admin()) { + return admin.describeTopics(List.of(topic)) + .allTopicNames() + .get() + .get(topic); + + } + } + + private void assertLeader(TopicPartition topicPartition, Integer expectedLeaderOpt) throws InterruptedException { + try (var admin = cluster.admin()) { + TestUtils.waitForCondition(() -> { + try { + Optional currentLeader = getCurrentLeader(admin, topicPartition); + return currentLeader.equals(Optional.of(expectedLeaderOpt)); + } catch (Exception e) { + if (e.getCause() instanceof UnknownTopicOrPartitionException) { + return false; + } + throw new RuntimeException(e); + } + }, "Waiting for leader to become " + expectedLeaderOpt); + } + } + + private Optional getCurrentLeader(Admin admin, TopicPartition topicPartition) throws Exception { + return admin.describeTopics(List.of(topicPartition.topic())) + .allTopicNames() + .get() + .get(topicPartition.topic()) + .partitions() + .stream() + .filter(p -> p.partition() == topicPartition.partition()) + .findFirst() + .map(TopicPartitionInfo::leader) + .map(Node::id); + } +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/security/GroupAuthorizerIntegrationTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/security/GroupAuthorizerIntegrationTest.java new file mode 100644 index 0000000000000..32069732db824 --- /dev/null +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/clients/security/GroupAuthorizerIntegrationTest.java @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.security; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.GroupProtocol; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.acl.AccessControlEntry; +import org.apache.kafka.common.acl.AccessControlEntryFilter; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.acl.AclPermissionType; +import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; +import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.internals.Topic; +import org.apache.kafka.common.resource.PatternType; +import org.apache.kafka.common.resource.Resource; +import org.apache.kafka.common.resource.ResourcePattern; +import org.apache.kafka.common.resource.ResourceType; +import org.apache.kafka.common.security.auth.AuthenticationContext; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.metadata.authorizer.StandardAuthorizer; +import org.apache.kafka.server.authorizer.AuthorizableRequestContext; +import org.apache.kafka.server.authorizer.Authorizer; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.test.TestUtils; + +import java.net.InetAddress; +import java.time.Duration; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; +import static org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +@ClusterTestDefaults(serverProperties = { + @ClusterConfigProperty(key = StandardAuthorizer.SUPER_USERS_CONFIG, value = "Group:broker"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + @ClusterConfigProperty(key = ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, value = "org.apache.kafka.metadata.authorizer.StandardAuthorizer"), + @ClusterConfigProperty(key = BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, value = "org.apache.kafka.clients.security.GroupAuthorizerIntegrationTest$GroupPrincipalBuilder"), +}) +public class GroupAuthorizerIntegrationTest { + private static final KafkaPrincipal BROKER_PRINCIPAL = new KafkaPrincipal("Group", "broker"); + private static final KafkaPrincipal CLIENT_PRINCIPAL = new KafkaPrincipal("Group", "client"); + + private static final String BROKER_LISTENER_NAME = "BROKER"; + private static final String CLIENT_LISTENER_NAME = "EXTERNAL"; + private static final String CONTROLLER_LISTENER_NAME = "CONTROLLER"; + + private Authorizer getAuthorizer(ClusterInstance clusterInstance) { + return clusterInstance.controllers().values().stream() + .filter(server -> server.authorizerPlugin().isDefined()) + .map(server -> server.authorizerPlugin().get().get()).findFirst().get(); + } + + private void setup(ClusterInstance clusterInstance) throws InterruptedException { + // Allow inter-broker communication + addAndVerifyAcls( + Set.of(createAcl(AclOperation.CLUSTER_ACTION, AclPermissionType.ALLOW, BROKER_PRINCIPAL)), + new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL), + clusterInstance + ); + addAndVerifyAcls( + Set.of(createAcl(AclOperation.CREATE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME, PatternType.LITERAL), + clusterInstance + ); + + NewTopic offsetTopic = new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, 1, (short) 1); + try (Admin admin = clusterInstance.admin(Map.of( + AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG, true)) + ) { + admin.createTopics(Collections.singleton(offsetTopic)); + clusterInstance.waitTopicCreation(Topic.GROUP_METADATA_TOPIC_NAME, 1); + } + } + + public static class GroupPrincipalBuilder extends DefaultKafkaPrincipalBuilder { + public GroupPrincipalBuilder() { + super(null, null); + } + + @Override + public KafkaPrincipal build(AuthenticationContext context) { + String listenerName = context.listenerName(); + return switch (listenerName) { + case BROKER_LISTENER_NAME, CONTROLLER_LISTENER_NAME -> BROKER_PRINCIPAL; + case CLIENT_LISTENER_NAME -> CLIENT_PRINCIPAL; + default -> throw new IllegalArgumentException("No principal mapped to listener " + listenerName); + }; + } + } + + private AccessControlEntry createAcl(AclOperation aclOperation, AclPermissionType aclPermissionType, KafkaPrincipal principal) { + return new AccessControlEntry( + principal.toString(), + WILDCARD_HOST, + aclOperation, + aclPermissionType + ); + } + + private void addAndVerifyAcls(Set acls, ResourcePattern resource, ClusterInstance clusterInstance) throws InterruptedException { + List aclBindings = acls.stream() + .map(acl -> new AclBinding(resource, acl)) + .toList(); + Authorizer authorizer = getAuthorizer(clusterInstance); + authorizer.createAcls(ANONYMOUS_CONTEXT, aclBindings) + .forEach(future -> { + try { + future.toCompletableFuture().get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Failed to create ACLs", e); + } + }); + AclBindingFilter aclBindingFilter = new AclBindingFilter(resource.toFilter(), AccessControlEntryFilter.ANY); + clusterInstance.waitAcls(aclBindingFilter, acls); + } + + private void removeAndVerifyAcls(Set deleteAcls, ResourcePattern resource, ClusterInstance clusterInstance) throws InterruptedException { + List aclBindingFilters = deleteAcls.stream() + .map(acl -> new AclBindingFilter(resource.toFilter(), acl.toFilter())) + .toList(); + Authorizer authorizer = getAuthorizer(clusterInstance); + authorizer.deleteAcls(ANONYMOUS_CONTEXT, aclBindingFilters) + .forEach(future -> { + try { + future.toCompletableFuture().get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Failed to delete ACLs", e); + } + }); + + AclBindingFilter aclBindingFilter = new AclBindingFilter(resource.toFilter(), AccessControlEntryFilter.ANY); + TestUtils.waitForCondition(() -> { + Set remainingAclEntries = new HashSet<>(); + authorizer.acls(aclBindingFilter).forEach(aclBinding -> remainingAclEntries.add(aclBinding.entry())); + return deleteAcls.stream().noneMatch(remainingAclEntries::contains); + }, "Failed to verify ACLs deletion"); + } + + + static final AuthorizableRequestContext ANONYMOUS_CONTEXT = new AuthorizableRequestContext() { + @Override + public String listenerName() { + return ""; + } + + @Override + public SecurityProtocol securityProtocol() { + return SecurityProtocol.PLAINTEXT; + } + + @Override + public KafkaPrincipal principal() { + return KafkaPrincipal.ANONYMOUS; + } + + @Override + public InetAddress clientAddress() { + return null; + } + + @Override + public int requestType() { + return 0; + } + + @Override + public int requestVersion() { + return 0; + } + + @Override + public String clientId() { + return ""; + } + + @Override + public int correlationId() { + return 0; + } + }; + + @ClusterTest + public void testUnauthorizedProduceAndConsumeWithClassicConsumer(ClusterInstance clusterInstance) throws InterruptedException { + testUnauthorizedProduceAndConsume(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testUnauthorizedProduceAndConsumeWithAsyncConsumer(ClusterInstance clusterInstance) throws InterruptedException { + testUnauthorizedProduceAndConsume(clusterInstance, GroupProtocol.CONSUMER); + } + + public void testUnauthorizedProduceAndConsume(ClusterInstance clusterInstance, GroupProtocol groupProtocol) throws InterruptedException { + setup(clusterInstance); + String topic = "topic"; + String group = "group"; + + addAndVerifyAcls( + Set.of(createAcl(AclOperation.CREATE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), + clusterInstance + ); + addAndVerifyAcls( + Set.of(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL), + clusterInstance + ); + + Producer producer = clusterInstance.producer(); + Consumer consumer = clusterInstance.consumer(Map.of( + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT), + ConsumerConfig.GROUP_ID_CONFIG, group + )); + + try { + clusterInstance.createTopic(topic, 1, (short) 1); + ExecutionException produceException = assertThrows( + ExecutionException.class, + () -> producer.send(new ProducerRecord<>(topic, "message".getBytes())).get() + ); + Throwable cause = produceException.getCause(); + assertInstanceOf(TopicAuthorizationException.class, cause); + TopicAuthorizationException topicAuthException = (TopicAuthorizationException) cause; + assertEquals(Set.of(topic), topicAuthException.unauthorizedTopics()); + + TopicPartition topicPartition = new TopicPartition(topic, 0); + consumer.assign(Collections.singletonList(topicPartition)); + TopicAuthorizationException consumeException = assertThrows( + TopicAuthorizationException.class, + () -> consumer.poll(Duration.ofSeconds(15)) + ); + assertEquals(consumeException.unauthorizedTopics(), topicAuthException.unauthorizedTopics()); + } finally { + producer.close(Duration.ZERO); + consumer.close(); + } + } + + @ClusterTest + public void testClassicConsumeUnsubscribeWithGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeUnsubscribeWithOrWithoutGroupPermission(clusterInstance, GroupProtocol.CLASSIC, true); + } + + @ClusterTest + public void testAsyncConsumeUnsubscribeWithGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeUnsubscribeWithOrWithoutGroupPermission(clusterInstance, GroupProtocol.CONSUMER, true); + } + + @ClusterTest + public void testClassicConsumeUnsubscribeWithoutGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeUnsubscribeWithOrWithoutGroupPermission(clusterInstance, GroupProtocol.CLASSIC, false); + } + + @ClusterTest + public void testAsyncConsumeUnsubscribeWithoutGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeUnsubscribeWithOrWithoutGroupPermission(clusterInstance, GroupProtocol.CONSUMER, false); + } + + private void testConsumeUnsubscribeWithOrWithoutGroupPermission(ClusterInstance clusterInstance, GroupProtocol groupProtocol, boolean withGroupPermission) throws InterruptedException, ExecutionException { + setup(clusterInstance); + String topic = "topic"; + String group = "group"; + + // allow topic read/write permission to poll/send record + Set acls = new HashSet<>(); + acls.add(createAcl(AclOperation.CREATE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + addAndVerifyAcls( + acls, + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), + clusterInstance + ); + addAndVerifyAcls( + Set.of(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL), + clusterInstance + ); + + try (Producer producer = clusterInstance.producer(); + Consumer consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, group, + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false", + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT))) + ) { + clusterInstance.createTopic(topic, 1, (short) 1); + producer.send(new ProducerRecord<>(topic, "message".getBytes())).get(); + consumer.subscribe(Collections.singletonList(topic)); + TestUtils.waitForCondition(() -> { + ConsumerRecords records = consumer.poll(Duration.ofSeconds(15)); + return records.count() == 1; + }, "consumer failed to receive message"); + if (!withGroupPermission) { + removeAndVerifyAcls( + Set.of(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL), + clusterInstance + ); + } + assertDoesNotThrow(consumer::unsubscribe); + } + } + + @ClusterTest + public void testClassicConsumeCloseWithGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeCloseWithGroupPermission(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAsyncConsumeCloseWithGroupPermission(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testConsumeCloseWithGroupPermission(clusterInstance, GroupProtocol.CONSUMER); + } + + private void testConsumeCloseWithGroupPermission(ClusterInstance clusterInstance, GroupProtocol groupProtocol) throws InterruptedException, ExecutionException { + setup(clusterInstance); + String topic = "topic"; + String group = "group"; + + // allow topic read/write permission to poll/send record + Set acls = new HashSet<>(); + acls.add(createAcl(AclOperation.CREATE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + addAndVerifyAcls( + acls, + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), + clusterInstance + ); + addAndVerifyAcls( + Set.of(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL), + clusterInstance + ); + + Producer producer = clusterInstance.producer(); + Consumer consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, group, + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false", + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT))); + + try { + clusterInstance.createTopic(topic, 1, (short) 1); + producer.send(new ProducerRecord<>(topic, "message".getBytes())).get(); + consumer.subscribe(List.of(topic)); + TestUtils.waitForCondition(() -> { + ConsumerRecords records = consumer.poll(Duration.ofSeconds(15)); + return records.count() == 1; + }, "consumer failed to receive message"); + } finally { + producer.close(); + assertDoesNotThrow(() -> consumer.close()); + } + } + + @ClusterTest + public void testAuthorizedProduceAndConsumeWithClassic(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testAuthorizedProduceAndConsume(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest + public void testAuthorizedProduceAndConsumeWithAsync(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + testAuthorizedProduceAndConsume(clusterInstance, GroupProtocol.CONSUMER); + } + + private void testAuthorizedProduceAndConsume(ClusterInstance clusterInstance, GroupProtocol groupProtocol) throws InterruptedException, ExecutionException { + setup(clusterInstance); + String topic = "topic"; + String group = "group"; + + Set acls = new HashSet<>(); + acls.add(createAcl(AclOperation.CREATE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + acls.add(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)); + addAndVerifyAcls( + acls, + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), + clusterInstance + ); + addAndVerifyAcls( + Set.of(createAcl(AclOperation.READ, AclPermissionType.ALLOW, CLIENT_PRINCIPAL)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL), + clusterInstance + ); + + try (Producer producer = clusterInstance.producer(); + Consumer consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_ID_CONFIG, group, + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false", + GROUP_PROTOCOL_CONFIG, groupProtocol.name.toLowerCase(Locale.ROOT))) + ) { + clusterInstance.createTopic(topic, 1, (short) 1); + producer.send(new ProducerRecord<>(topic, "message".getBytes())).get(); + TopicPartition topicPartition = new TopicPartition(topic, 0); + consumer.assign(List.of(topicPartition)); + TestUtils.waitForCondition(() -> { + ConsumerRecords records = consumer.poll(Duration.ofSeconds(15)); + return records.count() == 1; + }, "consumer failed to receive message"); + } + } + +} diff --git a/clients/clients-integration-tests/src/test/java/org/apache/kafka/server/quota/CustomQuotaCallbackTest.java b/clients/clients-integration-tests/src/test/java/org/apache/kafka/server/quota/CustomQuotaCallbackTest.java index da42c7a300747..de0948981b095 100644 --- a/clients/clients-integration-tests/src/test/java/org/apache/kafka/server/quota/CustomQuotaCallbackTest.java +++ b/clients/clients-integration-tests/src/test/java/org/apache/kafka/server/quota/CustomQuotaCallbackTest.java @@ -26,12 +26,13 @@ import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.test.ClusterInstance; -import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.Type; import org.apache.kafka.server.config.QuotaConfig; +import org.apache.kafka.test.TestUtils; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -111,7 +112,7 @@ private void assertMetrics(Metrics metrics, Map expectedTags) { } private static Map expectedTags(Map extraTags) { - Map tags = new LinkedHashMap<>(); + var tags = new HashMap(); tags.put("config", QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG); tags.put("class", MonitorableCustomQuotaCallback.class.getSimpleName()); tags.putAll(extraTags); @@ -173,7 +174,7 @@ public static class MonitorableCustomQuotaCallback extends CustomQuotaCallback i @Override public void withPluginMetrics(PluginMetrics metrics) { - MetricName metricName = metrics.metricName(METRIC_NAME, METRIC_DESCRIPTION, Map.of()); + MetricName metricName = metrics.metricName(METRIC_NAME, METRIC_DESCRIPTION, new LinkedHashMap<>()); metrics.addMetric(metricName, (Gauge) (config, now) -> 1); } diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index aa3b5c9d628c9..08b861673e3d7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -88,8 +88,9 @@ public class CommonClientConfigs { "If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms."; public static final String RETRIES_CONFIG = "retries"; - public static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." + - " It is recommended to set the value to either zero or `MAX_VALUE` and use corresponding timeout parameters to control how long a client should retry a request."; + public static final String RETRIES_DOC = "It is recommended to set the value to either MAX_VALUE or zero, and use corresponding timeout parameters to control how long a client should retry a request." + + " Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." + + " Setting a value of zero will lead to transient errors not being retried, and they will be propagated to the application to be handled."; public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms"; public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed request to a given topic partition. " + @@ -125,7 +126,9 @@ public class CommonClientConfigs { "\n" + "TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead."; public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; - public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation."; + public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. " + + "Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. " + + "When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list."; public static final String METRICS_CONTEXT_PREFIX = "metrics.context."; @@ -192,7 +195,8 @@ public class CommonClientConfigs { + "is considered failed and the group will rebalance in order to reassign the partitions to another member. " + "For consumers using a non-null group.instance.id which reach this timeout, partitions will not be immediately reassigned. " + "Instead, the consumer will stop sending heartbeats and partitions will be reassigned " - + "after expiration of session.timeout.ms. This mirrors the behavior of a static consumer which has shutdown."; + + "after expiration of the session timeout (defined by the client config session.timeout.ms if using the Classic rebalance protocol, or by the broker config group.consumer.session.timeout.ms if using the Consumer protocol). " + + "This mirrors the behavior of a static consumer which has shutdown."; public static final String REBALANCE_TIMEOUT_MS_CONFIG = "rebalance.timeout.ms"; public static final String REBALANCE_TIMEOUT_MS_DOC = "The maximum allowed time for each worker to join the group " @@ -206,15 +210,18 @@ public class CommonClientConfigs { + "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, " + "then the broker will remove this client from the group and initiate a rebalance. Note that the value " + "must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms " - + "and group.max.session.timeout.ms. Note that this configuration is not supported when group.protocol " - + "is set to \"consumer\"."; + + "and group.max.session.timeout.ms. Note that this client configuration is not supported when group.protocol " + + "is set to \"consumer\". In that case, session timeout is controlled by the broker config group.consumer.session.timeout.ms."; public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms"; public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer " + "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the " + "consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. " - + "The value must be set lower than session.timeout.ms, but typically should be set no higher " - + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances."; + + "This config is only supported if group.protocol is set to \"classic\". In that case, " + + "the value must be set lower than session.timeout.ms, but typically should be set no higher " + + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances." + + "If group.protocol is set to \"consumer\", this config is not supported, as " + + "the heartbeat interval is controlled by the broker with group.consumer.heartbeat.interval.ms."; public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = "default.api.timeout.ms"; public static final String DEFAULT_API_TIMEOUT_MS_DOC = "Specifies the timeout (in milliseconds) for client APIs. " + diff --git a/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java index 4aff7c8c0a88f..9162fa4d4c0b4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java @@ -43,9 +43,9 @@ public String toString() { public final int heartbeatIntervalMs; public final String groupId; public final Optional groupInstanceId; + public final Optional rackId; public final long retryBackoffMs; public final long retryBackoffMaxMs; - public final boolean leaveGroupOnClose; public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) { this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG); @@ -53,8 +53,12 @@ public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) { // Consumer and Connect use different config names for defining rebalance timeout if ((protocolType == ProtocolType.CONSUMER) || (protocolType == ProtocolType.SHARE)) { this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG); + + String rackId = config.getString(CommonClientConfigs.CLIENT_RACK_CONFIG); + this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); } else { this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG); + this.rackId = Optional.empty(); } this.heartbeatIntervalMs = config.getInt(CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG); @@ -75,13 +79,6 @@ public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) { this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MAX_MS_CONFIG); - - // Internal leave group config is only defined in Consumer. - if (protocolType == ProtocolType.CONSUMER) { - this.leaveGroupOnClose = config.getBoolean("internal.leave.group.on.close"); - } else { - this.leaveGroupOnClose = true; - } } // For testing purpose. @@ -90,16 +87,16 @@ public GroupRebalanceConfig(final int sessionTimeoutMs, final int heartbeatIntervalMs, String groupId, Optional groupInstanceId, + String rackId, long retryBackoffMs, - long retryBackoffMaxMs, - boolean leaveGroupOnClose) { + long retryBackoffMaxMs) { this.sessionTimeoutMs = sessionTimeoutMs; this.rebalanceTimeoutMs = rebalanceTimeoutMs; this.heartbeatIntervalMs = heartbeatIntervalMs; this.groupId = groupId; this.groupInstanceId = groupInstanceId; + this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); this.retryBackoffMs = retryBackoffMs; this.retryBackoffMaxMs = retryBackoffMaxMs; - this.leaveGroupOnClose = leaveGroupOnClose; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/Metadata.java b/clients/src/main/java/org/apache/kafka/clients/Metadata.java index b60156aae0066..0986d8a67bc36 100644 --- a/clients/src/main/java/org/apache/kafka/clients/Metadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/Metadata.java @@ -381,7 +381,7 @@ public synchronized void update(int requestVersion, MetadataResponse response, b public synchronized Set updatePartitionLeadership(Map partitionLeaders, List leaderNodes) { Map newNodes = leaderNodes.stream().collect(Collectors.toMap(Node::id, node -> node)); // Insert non-overlapping nodes from existing-nodes into new-nodes. - this.metadataSnapshot.cluster().nodes().stream().forEach(node -> newNodes.putIfAbsent(node.id(), node)); + this.metadataSnapshot.cluster().nodes().forEach(node -> newNodes.putIfAbsent(node.id(), node)); // Create partition-metadata for all updated partitions. Exclude updates for partitions - // 1. for which the corresponding partition has newer leader in existing metadata. @@ -508,7 +508,7 @@ private MetadataSnapshot handleMetadataResponse(MetadataResponse metadataRespons topicId = null; } - if (!retainTopic(topicName, metadata.isInternal(), nowMs)) + if (!retainTopic(topicName, topicId, metadata.isInternal(), nowMs)) continue; if (metadata.isInternal()) @@ -758,10 +758,20 @@ public Map topicNames() { return metadataSnapshot.topicNames(); } + /** + * Based on the topic name, check if the topic metadata should be kept when received in a metadata response. + */ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { return true; } + /** + * Based on the topic name and topic ID, check if the topic metadata should be kept when received in a metadata response. + */ + protected boolean retainTopic(String topicName, Uuid topicId, boolean isInternal, long nowMs) { + return retainTopic(topicName, isInternal, nowMs); + } + public static class MetadataRequestAndVersion { public final MetadataRequest.Builder requestBuilder; public final int requestVersion; diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 1100479021fc5..692847a8b1553 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -1051,9 +1051,9 @@ private void handleApiVersionsResponse(List responses, apiVersionsResponse.data().finalizedFeaturesEpoch()); apiVersions.update(node, nodeVersionInfo); this.connectionStates.ready(node); - log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, ZK migration ready: {}, API versions: {}.", + log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, API versions: {}.", node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(), - apiVersionsResponse.data().supportedFeatures(), apiVersionsResponse.data().zkMigrationReady(), nodeVersionInfo); + apiVersionsResponse.data().supportedFeatures(), nodeVersionInfo); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterOptions.java index e28a03d541c3d..81e889db30d61 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AddRaftVoterOptions.java @@ -17,11 +17,20 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.protocol.Errors; import java.util.Optional; /** * Options for {@link Admin#addRaftVoter}. + * + *

+ * The clusterId is optional. + *

+ * If provided, the request will only succeed if the cluster id matches the id of the current cluster. + * If the cluster id does not match, the request will fail with {@link Errors#INCONSISTENT_CLUSTER_ID}. + *

+ * If not provided, the cluster id check is skipped. */ @InterfaceStability.Stable public class AddRaftVoterOptions extends AbstractOptions { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java index 2cfde026438df..1d90a84a79b18 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java @@ -32,6 +32,7 @@ import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.errors.FeatureUpdateFailedException; +import org.apache.kafka.common.errors.InconsistentClusterIdException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.quota.ClientQuotaAlteration; @@ -250,9 +251,8 @@ default DeleteTopicsResult deleteTopics(TopicCollection topics) { * During this time, {@link #listTopics()} and {@link #describeTopics(Collection)} * may continue to return information about the deleted topics. *

- * If delete.topic.enable is false on the brokers, deleteTopics will mark - * the topics for deletion, but not actually delete them. The futures will - * return successfully in this case. + * If delete.topic.enable is set to false on the brokers, an exception will be returned to the client indicating + * that topic deletion is disabled. *

* When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. * When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher. @@ -951,24 +951,24 @@ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(MapNote: this method effectively does the same as the corresponding consumer group method {@link Admin#listConsumerGroupOffsets} does. * - * @param groupSpecs Map of Streams group ids to a spec that specifies the topic partitions of the group to list offsets for. + * @param groupSpecs Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for. * - * @param options The options to use when listing the Streams group offsets. + * @param options The options to use when listing the streams group offsets. * @return The ListStreamsGroupOffsetsResult */ ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options); /** - * List the Streams group offsets available in the cluster for the specified groups with the default options. + * List the streams group offsets available in the cluster for the specified groups with the default options. *

* This is a convenience method for * {@link #listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)} with default options. * - * @param groupSpecs Map of Streams group ids to a spec that specifies the topic partitions of the group to list offsets for. + * @param groupSpecs Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for. * @return The ListStreamsGroupOffsetsResult. */ default ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs) { @@ -993,17 +993,17 @@ default DeleteConsumerGroupsResult deleteConsumerGroups(Collection group } /** - * Delete Streams groups from the cluster. + * Delete streams groups from the cluster. * * Note: this method effectively does the same as the corresponding consumer group method {@link Admin#deleteConsumerGroups} does. * - * @param options The options to use when deleting a Streams group. + * @param options The options to use when deleting a streams group. * @return The DeleteStreamsGroupsResult. */ DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options); /** - * Delete Streams groups from the cluster with the default options. + * Delete streams groups from the cluster with the default options. * * @return The DeleteStreamsGroupResult. */ @@ -1035,13 +1035,13 @@ default DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String group } /** - * Delete committed offsets for a set of partitions in a Streams group. This will + * Delete committed offsets for a set of partitions in a streams group. This will * succeed at the partition level only if the group is not actively subscribed * to the corresponding topic. * * Note: this method effectively does the same as the corresponding consumer group method {@link Admin#deleteConsumerGroupOffsets} does. * - * @param options The options to use when deleting offsets in a Streams group. + * @param options The options to use when deleting offsets in a streams group. * @return The DeleteStreamsGroupOffsetsResult. */ DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, @@ -1049,7 +1049,7 @@ DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, DeleteStreamsGroupOffsetsOptions options); /** - * Delete committed offsets for a set of partitions in a Streams group with the default + * Delete committed offsets for a set of partitions in a streams group with the default * options. This will succeed at the partition level only if the group is not actively * subscribed to the corresponding topic. * @@ -1776,12 +1776,36 @@ default FenceProducersResult fenceProducers(Collection transactionalIds) FenceProducersResult fenceProducers(Collection transactionalIds, FenceProducersOptions options); + /** + * List the configuration resources available in the cluster which matches config resource type. + * If no config resource types are specified, all configuration resources will be listed. + * + * @param configResourceTypes The set of configuration resource types to list. + * @param options The options to use when listing the configuration resources. + * @return The ListConfigurationResourcesResult. + */ + ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options); + + /** + * List all configuration resources available in the cluster with the default options. + *

+ * This is a convenience method for {@link #listConfigResources(Set, ListConfigResourcesOptions)} + * with default options. See the overload for more details. + * + * @return The ListConfigurationResourcesResult. + */ + default ListConfigResourcesResult listConfigResources() { + return listConfigResources(Set.of(), new ListConfigResourcesOptions()); + } + /** * List the client metrics configuration resources available in the cluster. * * @param options The options to use when listing the client metrics resources. * @return The ListClientMetricsResourcesResult. + * @deprecated Since 4.1. Use {@link #listConfigResources(Set, ListConfigResourcesOptions)} instead. */ + @Deprecated(since = "4.1", forRemoval = true) ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options); /** @@ -1791,7 +1815,9 @@ FenceProducersResult fenceProducers(Collection transactionalIds, * with default options. See the overload for more details. * * @return The ListClientMetricsResourcesResult. + * @deprecated Since 4.1. Use {@link #listConfigResources()} instead. */ + @Deprecated(since = "4.1", forRemoval = true) default ListClientMetricsResourcesResult listClientMetricsResources() { return listClientMetricsResources(new ListClientMetricsResourcesOptions()); } @@ -1841,10 +1867,17 @@ default AddRaftVoterResult addRaftVoter( /** * Add a new voter node to the KRaft metadata quorum. * + *

+ * The clusterId in {@link AddRaftVoterOptions} is optional. + * If provided, the operation will only succeed if the cluster id matches the id + * of the current cluster. If the cluster id does not match, the operation + * will fail with {@link InconsistentClusterIdException}. + * If not provided, the cluster id check is skipped. + * * @param voterId The node ID of the voter. * @param voterDirectoryId The directory ID of the voter. * @param endpoints The endpoints that the new voter has. - * @param options The options to use when adding the new voter node. + * @param options Additional options for the operation, including optional cluster ID. */ AddRaftVoterResult addRaftVoter( int voterId, @@ -1869,9 +1902,16 @@ default RemoveRaftVoterResult removeRaftVoter( /** * Remove a voter node from the KRaft metadata quorum. * + *

+ * The clusterId in {@link RemoveRaftVoterOptions} is optional. + * If provided, the operation will only succeed if the cluster id matches the id + * of the current cluster. If the cluster id does not match, the operation + * will fail with {@link InconsistentClusterIdException}. + * If not provided, the cluster id check is skipped. + * * @param voterId The node ID of the voter. * @param voterDirectoryId The directory ID of the voter. - * @param options The options to use when removing the voter node. + * @param options Additional options for the operation, including optional cluster ID. */ RemoveRaftVoterResult removeRaftVoter( int voterId, @@ -1951,28 +1991,28 @@ default ListShareGroupOffsetsResult listShareGroupOffsets(Map partitions, DeleteShareGroupOffsetsOptions options); + DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options); /** - * Delete offsets for a set of partitions in a share group with the default options. + * Delete offsets for a set of topics in a share group with the default options. * *

* This is a convenience method for {@link #deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)} with default options. * See the overload for more details. * * @param groupId The group for which to delete offsets. - * @param partitions The topic-partitions. + * @param topics The topics for which to delete offsets. * @return The DeleteShareGroupOffsetsResult. */ - default DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set partitions) { - return deleteShareGroupOffsets(groupId, partitions, new DeleteShareGroupOffsetsOptions()); + default DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics) { + return deleteShareGroupOffsets(groupId, topics, new DeleteShareGroupOffsetsOptions()); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java index a87af6be154a5..471d3916cfb55 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java @@ -30,6 +30,7 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.Utils; +import java.util.List; import java.util.Map; import java.util.Set; @@ -154,12 +155,14 @@ public class AdminClientConfig extends AbstractConfig { static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - "", + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, BOOTSTRAP_SERVERS_DOC). define(BOOTSTRAP_CONTROLLERS_CONFIG, Type.LIST, - "", + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, BOOTSTRAP_CONTROLLERS_DOC) .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CLIENT_ID_DOC) @@ -238,6 +241,7 @@ public class AdminClientConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, METRIC_REPORTER_CLASSES_DOC) .define(METRICS_RECORDING_LEVEL_CONFIG, @@ -280,7 +284,13 @@ public class AdminClientConfig extends AbstractConfig { DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, atLeast(0), Importance.LOW, - METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); + METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) + .define(CONFIG_PROVIDERS_CONFIG, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.LOW, + CONFIG_PROVIDERS_DOC); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java index 48d5646764d42..789c9f64a93aa 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java @@ -26,6 +26,20 @@ /** * A class representing an alter configuration entry containing name, value and operation type. + *

+ * Note for Broker Logger Configuration:
+ * When altering broker logger levels (using {@link org.apache.kafka.common.config.ConfigResource.Type#BROKER_LOGGER}), + * it is strongly recommended to use log level constants from {@link org.apache.kafka.common.config.LogLevelConfig} instead of string literals. + * This ensures compatibility with Kafka's log level validation and avoids potential configuration errors. + *

+ * Example: + *

+ * Recommended approach:
+ * new AlterConfigOp(new ConfigEntry(loggerName, LogLevelConfig.DEBUG_LOG_LEVEL), OpType.SET)
+ *
+ * Avoid this:
+ * new AlterConfigOp(new ConfigEntry(loggerName, "DEBUG"), OpType.SET)
+ * 
*/ public class AlterConfigOp { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.java index 7c41852231d90..293daaadbb925 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.protocol.Errors; @@ -35,9 +36,9 @@ @InterfaceStability.Evolving public class AlterShareGroupOffsetsResult { - private final KafkaFuture> future; + private final KafkaFuture> future; - AlterShareGroupOffsetsResult(KafkaFuture> future) { + AlterShareGroupOffsetsResult(KafkaFuture> future) { this.future = future; } @@ -54,11 +55,11 @@ public KafkaFuture partitionResult(final TopicPartition partition) { result.completeExceptionally(new IllegalArgumentException( "Alter offset for partition \"" + partition + "\" was not attempted")); } else { - final Errors error = topicPartitions.get(partition); - if (error == Errors.NONE) { + final ApiException exception = topicPartitions.get(partition); + if (exception == null) { result.complete(null); } else { - result.completeExceptionally(error.exception()); + result.completeExceptionally(exception); } } }); @@ -68,22 +69,22 @@ public KafkaFuture partitionResult(final TopicPartition partition) { /** * Return a future which succeeds if all the alter offsets succeed. + * If not, the first topic error shall be returned. */ public KafkaFuture all() { return this.future.thenApply(topicPartitionErrorsMap -> { List partitionsFailed = topicPartitionErrorsMap.entrySet() .stream() - .filter(e -> e.getValue() != Errors.NONE) + .filter(e -> e.getValue() != null) .map(Map.Entry::getKey) .collect(Collectors.toList()); - for (Errors error : topicPartitionErrorsMap.values()) { - if (error != Errors.NONE) { - throw error.exception( - "Failed altering share group offsets for the following partitions: " + partitionsFailed); + for (ApiException exception : topicPartitionErrorsMap.values()) { + if (exception != null) { + throw Errors.forException(exception).exception( + "Failed altering group offsets for the following partitions: " + partitionsFailed); } } return null; }); } - } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java index b5c85b5873204..d5af97080b080 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java @@ -18,6 +18,7 @@ import java.util.Objects; +@Deprecated(since = "4.1") public class ClientMetricsResourceListing { private final String name; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.java index d611881a81319..d9480d4ac062c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.KafkaFuture; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.internals.KafkaFutureImpl; @@ -33,27 +32,27 @@ @InterfaceStability.Evolving public class DeleteShareGroupOffsetsResult { - private final KafkaFuture> future; - private final Set partitions; + private final KafkaFuture> future; + private final Set topics; - DeleteShareGroupOffsetsResult(KafkaFuture> future, Set partitions) { + DeleteShareGroupOffsetsResult(KafkaFuture> future, Set topics) { this.future = future; - this.partitions = partitions; + this.topics = topics; } /** * Return a future which succeeds only if all the deletions succeed. - * If not, the first partition error shall be returned. + * If not, the first topic error shall be returned. */ public KafkaFuture all() { final KafkaFutureImpl result = new KafkaFutureImpl<>(); - this.future.whenComplete((topicPartitions, throwable) -> { + this.future.whenComplete((topicResults, throwable) -> { if (throwable != null) { result.completeExceptionally(throwable); } else { - for (TopicPartition partition : partitions) { - if (maybeCompleteExceptionally(topicPartitions, partition, result)) { + for (String topic : topics) { + if (maybeCompleteExceptionally(topicResults, topic, result)) { return; } } @@ -64,32 +63,32 @@ public KafkaFuture all() { } /** - * Return a future which can be used to check the result for a given partition. + * Return a future which can be used to check the result for a given topic. */ - public KafkaFuture partitionResult(final TopicPartition partition) { - if (!partitions.contains(partition)) { - throw new IllegalArgumentException("Partition " + partition + " was not included in the original request"); + public KafkaFuture topicResult(final String topic) { + if (!topics.contains(topic)) { + throw new IllegalArgumentException("Topic " + topic + " was not included in the original request"); } final KafkaFutureImpl result = new KafkaFutureImpl<>(); - this.future.whenComplete((topicPartitions, throwable) -> { + this.future.whenComplete((topicResults, throwable) -> { if (throwable != null) { result.completeExceptionally(throwable); - } else if (!maybeCompleteExceptionally(topicPartitions, partition, result)) { + } else if (!maybeCompleteExceptionally(topicResults, topic, result)) { result.complete(null); } }); return result; } - private boolean maybeCompleteExceptionally(Map partitionLevelErrors, - TopicPartition partition, + private boolean maybeCompleteExceptionally(Map topicLevelErrors, + String topic, KafkaFutureImpl result) { Throwable exception; - if (!partitionLevelErrors.containsKey(partition)) { - exception = new IllegalArgumentException("Offset deletion result for partition \"" + partition + "\" was not included in the response"); + if (!topicLevelErrors.containsKey(topic)) { + exception = new IllegalArgumentException("Offset deletion result for topic \"" + topic + "\" was not included in the response"); } else { - exception = partitionLevelErrors.get(partition); + exception = topicLevelErrors.get(topic); } if (exception != null) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.java index a41ec6d00b349..80fd55c732300 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsOptions.java @@ -22,7 +22,7 @@ import java.util.Collection; /** - * Options for the {@link Admin#deleteShareGroups(Collection , DeleteShareGroupsOptions)} call. + * Options for the {@link Admin#deleteShareGroups(Collection, DeleteShareGroupsOptions)} call. *

* The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsResult.java index c2791e681f719..ff53da08df81f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupsResult.java @@ -25,7 +25,7 @@ import java.util.Map; /** - * The result of the {@link Admin#deleteShareGroups(Collection , DeleteShareGroupsOptions)} call. + * The result of the {@link Admin#deleteShareGroups(Collection, DeleteShareGroupsOptions)} call. *

* The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.java index 6cd14797122b3..6ca2ec66a276e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteStreamsGroupsOptions.java @@ -21,7 +21,7 @@ import java.util.Collection; /** - * Options for the {@link Admin#deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)} call. + * Options for the {@link Admin#deleteStreamsGroups(Collection, DeleteStreamsGroupsOptions)} call. *

* The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java index 5a2f55c544fe1..fad56892f4596 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java @@ -69,7 +69,7 @@ public KafkaFuture> all() { retval.completeExceptionally(Errors.forCode(optionalFirstFailedDescribe.get().errorCode()).exception(optionalFirstFailedDescribe.get().errorMessage())); } else { Map retvalMap = new HashMap<>(); - data.results().stream().forEach(userResult -> + data.results().forEach(userResult -> retvalMap.put(userResult.user(), new UserScramCredentialsDescription(userResult.user(), getScramCredentialInfosFor(userResult)))); retval.complete(retvalMap); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java index 136d8236bd5ca..b99e4f6587bd7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java @@ -300,6 +300,12 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, return delegate.fenceProducers(transactionalIds, options); } + @Override + public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { + return delegate.listConfigResources(configResourceTypes, options); + } + + @SuppressWarnings({"deprecation", "removal"}) @Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { return delegate.listClientMetricsResources(options); @@ -336,8 +342,8 @@ public ListShareGroupOffsetsResult listShareGroupOffsets(Map partitions, DeleteShareGroupOffsetsOptions options) { - return delegate.deleteShareGroupOffsets(groupId, partitions, options); + public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options) { + return delegate.deleteShareGroupOffsets(groupId, topics, options); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 844f6962160ff..78a7f905319c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -159,7 +159,7 @@ import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; import org.apache.kafka.common.message.ExpireDelegationTokenRequestData; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; -import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; +import org.apache.kafka.common.message.ListConfigResourcesRequestData; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData; @@ -233,8 +233,8 @@ import org.apache.kafka.common.requests.IncrementalAlterConfigsRequest; import org.apache.kafka.common.requests.IncrementalAlterConfigsResponse; import org.apache.kafka.common.requests.JoinGroupRequest; -import org.apache.kafka.common.requests.ListClientMetricsResourcesRequest; -import org.apache.kafka.common.requests.ListClientMetricsResourcesResponse; +import org.apache.kafka.common.requests.ListConfigResourcesRequest; +import org.apache.kafka.common.requests.ListConfigResourcesResponse; import org.apache.kafka.common.requests.ListGroupsRequest; import org.apache.kafka.common.requests.ListGroupsResponse; import org.apache.kafka.common.requests.ListOffsetsRequest; @@ -419,11 +419,11 @@ public class KafkaAdminClient extends AdminClient { /** * Get or create a list value from a map. * - * @param map The map to get or create the element from. - * @param key The key. - * @param The key type. - * @param The value type. - * @return The list value. + * @param map The map to get or create the element from. + * @param key The key. + * @param The key type. + * @param The value type. + * @return The list value. */ static List getOrCreateListValue(Map> map, K key) { return map.computeIfAbsent(key, k -> new LinkedList<>()); @@ -432,9 +432,9 @@ static List getOrCreateListValue(Map> map, K key) { /** * Send an exception to every element in a collection of KafkaFutureImpls. * - * @param futures The collection of KafkaFutureImpl objects. - * @param exc The exception - * @param The KafkaFutureImpl result type. + * @param futures The collection of KafkaFutureImpl objects. + * @param exc The exception + * @param The KafkaFutureImpl result type. */ private static void completeAllExceptionally(Collection> futures, Throwable exc) { completeAllExceptionally(futures.stream(), exc); @@ -443,9 +443,9 @@ private static void completeAllExceptionally(Collection> /** * Send an exception to all futures in the provided stream * - * @param futures The stream of KafkaFutureImpl objects. - * @param exc The exception - * @param The KafkaFutureImpl result type. + * @param futures The stream of KafkaFutureImpl objects. + * @param exc The exception + * @param The KafkaFutureImpl result type. */ private static void completeAllExceptionally(Stream> futures, Throwable exc) { futures.forEach(future -> future.completeExceptionally(exc)); @@ -454,9 +454,9 @@ private static void completeAllExceptionally(Stream> futu /** * Get the current time remaining before a deadline as an integer. * - * @param now The current time in milliseconds. - * @param deadlineMs The deadline time in milliseconds. - * @return The time delta in milliseconds. + * @param now The current time in milliseconds. + * @param deadlineMs The deadline time in milliseconds. + * @return The time delta in milliseconds. */ static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) { long deltaMs = deadlineMs - now; @@ -470,9 +470,8 @@ else if (deltaMs < Integer.MIN_VALUE) /** * Generate the client id based on the configuration. * - * @param config The configuration - * - * @return The client id + * @param config The configuration + * @return The client id */ static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); @@ -488,10 +487,9 @@ String getClientId() { /** * Get the deadline for a particular call. * - * @param now The current time in milliseconds. - * @param optionTimeoutMs The timeout option given by the user. - * - * @return The deadline in milliseconds. + * @param now The current time in milliseconds. + * @param optionTimeoutMs The timeout option given by the user. + * @return The deadline in milliseconds. */ private long calcDeadlineMs(long now, Integer optionTimeoutMs) { if (optionTimeoutMs != null) @@ -502,9 +500,8 @@ private long calcDeadlineMs(long now, Integer optionTimeoutMs) { /** * Pretty-print an exception. * - * @param throwable The exception. - * - * @return A compact human-readable string. + * @param throwable The exception. + * @return A compact human-readable string. */ static String prettyPrintException(Throwable throwable) { if (throwable == null) @@ -550,7 +547,7 @@ static KafkaAdminClient createInternal( .recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, - config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); + config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); metrics = new Metrics(metricConfig, reporters, time, metricsContext); networkClient = ClientUtils.createNetworkClient(config, clientId, @@ -582,10 +579,12 @@ static KafkaAdminClient createInternal(AdminClientConfig config, Time time) { Metrics metrics = null; String clientId = generateClientId(config); + List reporters = CommonClientConfigs.metricsReporters(clientId, config); Optional clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); + clientTelemetryReporter.ifPresent(reporters::add); try { - metrics = new Metrics(new MetricConfig(), new LinkedList<>(), time); + metrics = new Metrics(new MetricConfig(), reporters, time); LogContext logContext = createLogContext(clientId); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, client, null, logContext, clientTelemetryReporter); @@ -630,9 +629,7 @@ private KafkaAdminClient(AdminClientConfig config, CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, retryBackoffMaxMs, CommonClientConfigs.RETRY_BACKOFF_JITTER); - List reporters = CommonClientConfigs.metricsReporters(this.clientId, config); this.clientTelemetryReporter = clientTelemetryReporter; - this.clientTelemetryReporter.ifPresent(reporters::add); this.metadataRecoveryStrategy = MetadataRecoveryStrategy.forName(config.getString(AdminClientConfig.METADATA_RECOVERY_STRATEGY_CONFIG)); this.partitionLeaderCache = new HashMap<>(); this.adminFetchMetricsManager = new AdminFetchMetricsManager(metrics); @@ -656,11 +653,11 @@ private int configureDefaultApiTimeoutMs(AdminClientConfig config) { if (defaultApiTimeoutMs < requestTimeoutMs) { if (config.originals().containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) { throw new ConfigException("The specified value of " + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG + - " must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + "."); + " must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + "."); } else { log.warn("Overriding the default value for {} ({}) with the explicitly configured request timeout {}", - AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs, - requestTimeoutMs); + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs, + requestTimeoutMs); return requestTimeoutMs; } } @@ -718,6 +715,7 @@ public void close(Duration timeout) { */ private interface NodeProvider { Node provide(); + boolean supportsUseControllers(); } @@ -727,7 +725,7 @@ public Node provide() { long now = time.milliseconds(); LeastLoadedNode leastLoadedNode = client.leastLoadedNode(now); if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP - && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { + && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { metadataManager.rebootstrap(now); } @@ -757,7 +755,7 @@ private class ConstantNodeIdProvider implements NodeProvider { @Override public Node provide() { if (metadataManager.isReady() && - (metadataManager.nodeById(nodeId) != null)) { + (metadataManager.nodeById(nodeId) != null)) { return metadataManager.nodeById(nodeId); } // If we can't find the node with the given constant ID, we schedule a @@ -791,7 +789,7 @@ private class ControllerNodeProvider implements NodeProvider { @Override public Node provide() { if (metadataManager.isReady() && - (metadataManager.controller() != null)) { + (metadataManager.controller() != null)) { return metadataManager.controller(); } metadataManager.requestUpdate(); @@ -825,36 +823,6 @@ public boolean supportsUseControllers() { } } - /** - * Provides the least loaded broker, or the active kcontroller if we're using - * bootstrap.controllers. - */ - private class ConstantBrokerOrActiveKController implements NodeProvider { - private final int nodeId; - - ConstantBrokerOrActiveKController(int nodeId) { - this.nodeId = nodeId; - } - - @Override - public Node provide() { - if (metadataManager.isReady()) { - if (metadataManager.usingBootstrapControllers()) { - return metadataManager.controller(); - } else if (metadataManager.nodeById(nodeId) != null) { - return metadataManager.nodeById(nodeId); - } - } - metadataManager.requestUpdate(); - return null; - } - - @Override - public boolean supportsUseControllers() { - return true; - } - } - /** * Provides the least loaded broker, or the active kcontroller if we're using * bootstrap.controllers. @@ -923,13 +891,13 @@ protected Node curNode() { /** * Handle a failure. - * + *

* Depending on what the exception is and how many times we have already tried, we may choose to * fail the Call, or retry it. It is important to print the stack traces here in some cases, * since they are not necessarily preserved in ApiVersionException objects. * - * @param now The current time in milliseconds. - * @param throwable The failure exception. + * @param now The current time in milliseconds. + * @param throwable The failure exception. */ final void fail(long now, Throwable throwable) { if (curNode != null) { @@ -945,7 +913,7 @@ final void fail(long now, Throwable throwable) { // protocol downgrade will not count against the total number of retries we get for // this RPC. That is why 'tries' is not incremented. if ((throwable instanceof UnsupportedVersionException) && - handleUnsupportedVersionException((UnsupportedVersionException) throwable)) { + handleUnsupportedVersionException((UnsupportedVersionException) throwable)) { log.debug("{} attempting protocol downgrade and then retry.", this); runnable.pendingCalls.add(this); return; @@ -999,16 +967,14 @@ private void handleTimeoutFailure(long now, Throwable cause) { * Create an AbstractRequest.Builder for this Call. * * @param timeoutMs The timeout in milliseconds. - * - * @return The AbstractRequest builder. + * @return The AbstractRequest builder. */ abstract AbstractRequest.Builder createRequest(int timeoutMs); /** * Process the call response. * - * @param abstractResponse The AbstractResponse. - * + * @param abstractResponse The AbstractResponse. */ abstract void handleResponse(AbstractResponse abstractResponse); @@ -1016,16 +982,15 @@ private void handleTimeoutFailure(long now, Throwable cause) { * Handle a failure. This will only be called if the failure exception was not * retriable, or if we hit a timeout. * - * @param throwable The exception. + * @param throwable The exception. */ abstract void handleFailure(Throwable throwable); /** * Handle an UnsupportedVersionException. * - * @param exception The exception. - * - * @return True if the exception can be handled; false otherwise. + * @param exception The exception. + * @return True if the exception can be handled; false otherwise. */ boolean handleUnsupportedVersionException(UnsupportedVersionException exception) { return false; @@ -1062,7 +1027,7 @@ static class TimeoutProcessor { /** * Create a new timeout processor. * - * @param now The current time in milliseconds since the epoch. + * @param now The current time in milliseconds since the epoch. */ TimeoutProcessor(long now) { this.now = now; @@ -1074,9 +1039,8 @@ static class TimeoutProcessor { * Timed out calls will be removed and failed. * The remaining milliseconds until the next timeout will be updated. * - * @param calls The collection of calls. - * - * @return The number of calls which were timed out. + * @param calls The collection of calls. + * @return The number of calls which were timed out. */ int handleTimeouts(Collection calls, String msg) { int numTimedOut = 0; @@ -1098,9 +1062,8 @@ int handleTimeouts(Collection calls, String msg) { * Check whether a call should be timed out. * The remaining milliseconds until the next timeout will be updated. * - * @param call The call. - * - * @return True if the call should be timed out. + * @param call The call. + * @return True if the call should be timed out. */ boolean callHasExpired(Call call) { int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs); @@ -1160,7 +1123,7 @@ private final class AdminClientRunnable implements Runnable { /** * Time out the elements in the pendingCalls list which are expired. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private void timeoutPendingCalls(TimeoutProcessor processor) { int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment."); @@ -1171,7 +1134,7 @@ private void timeoutPendingCalls(TimeoutProcessor processor) { /** * Time out calls which have been assigned to nodes. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private int timeoutCallsToSend(TimeoutProcessor processor) { int numTimedOut = 0; @@ -1186,7 +1149,7 @@ private int timeoutCallsToSend(TimeoutProcessor processor) { /** * Drain all the calls from newCalls into pendingCalls. - * + *

* This function holds the lock for the minimum amount of time, to avoid blocking * users of AdminClient who will also take the lock to add new calls. */ @@ -1198,7 +1161,7 @@ private synchronized void drainNewCalls() { * Add some calls to pendingCalls, and then clear the input list. * Also clears Call#curNode. * - * @param calls The calls to add. + * @param calls The calls to add. */ private void transitionToPendingAndClearList(List calls) { for (Call call : calls) { @@ -1211,9 +1174,9 @@ private void transitionToPendingAndClearList(List calls) { /** * Choose nodes for the calls in the pendingCalls list. * - * @param now The current time in milliseconds. - * @return The minimum time until a call is ready to be retried if any of the pending - * calls are backing off after a failure + * @param now The current time in milliseconds. + * @return The minimum time until a call is ready to be retried if any of the pending + * calls are backing off after a failure */ private long maybeDrainPendingCalls(long now) { long pollTimeout = Long.MAX_VALUE; @@ -1271,8 +1234,8 @@ private boolean maybeDrainPendingCall(Call call, long now) { /** * Send the calls which are ready. * - * @param now The current time in milliseconds. - * @return The minimum timeout we need for poll(). + * @param now The current time in milliseconds. + * @return The minimum timeout we need for poll(). */ private long sendEligibleCalls(long now) { long pollTimeout = Long.MAX_VALUE; @@ -1294,7 +1257,7 @@ private long sendEligibleCalls(long now) { if (deadline != null) { if (now >= deadline) { log.info("Disconnecting from {} and revoking {} node assignment(s) " + - "because the node is taking too long to become ready.", + "because the node is taking too long to become ready.", node.idString(), calls.size()); transitionToPendingAndClearList(calls); client.disconnect(node.idString()); @@ -1347,12 +1310,12 @@ private long sendEligibleCalls(long now) { /** * Time out expired calls that are in flight. - * + *

* Calls that are in flight may have been partially or completely sent over the wire. They may * even be in the process of being processed by the remote server. At the moment, our only option * to time them out is to close the entire connection. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private void timeoutCallsInFlight(TimeoutProcessor processor) { int numTimedOut = 0; @@ -1375,8 +1338,8 @@ private void timeoutCallsInFlight(TimeoutProcessor processor) { /** * Handle responses from the server. * - * @param now The current time in milliseconds. - * @param responses The latest responses from KafkaClient. + * @param now The current time in milliseconds. + * @param responses The latest responses from KafkaClient. */ private void handleResponses(long now, List responses) { for (ClientResponse response : responses) { @@ -1387,7 +1350,7 @@ private void handleResponses(long now, List responses) { // If the server returns information about a correlation ID we didn't use yet, // an internal server error has occurred. Close the connection and log an error message. log.error("Internal server error on {}: server returned information about unknown " + - "correlation ID {}, requestHeader = {}", response.destination(), correlationId, + "correlation ID {}, requestHeader = {}", response.destination(), correlationId, response.requestHeader()); client.disconnect(response.destination()); continue; @@ -1506,7 +1469,7 @@ public void run() { numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited."); numTimedOut += timeoutCallsToSend(timeoutProcessor); numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(), - "The AdminClient thread has exited."); + "The AdminClient thread has exited."); if (numTimedOut > 0) { log.info("Timed out {} remaining operation(s) during close.", numTimedOut); } @@ -1576,13 +1539,13 @@ private void processRequests() { /** * Queue a call for sending. - * + *

* If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even * if the AdminClient is shutting down). This function should called when retrying an * existing call. * - * @param call The new call object. - * @param now The current time in milliseconds. + * @param call The new call object. + * @param now The current time in milliseconds. */ void enqueue(Call call, long now) { if (call.tries > maxRetries) { @@ -1613,18 +1576,18 @@ void enqueue(Call call, long now) { /** * Initiate a new call. - * + *

* This will fail if the AdminClient is scheduled to shut down. * - * @param call The new call object. - * @param now The current time in milliseconds. + * @param call The new call object. + * @param now The current time in milliseconds. */ void call(Call call, long now) { if (hardShutdownTimeMs.get() != INVALID_SHUTDOWN_TIME) { log.debug("Cannot accept new call {} when AdminClient is closing.", call); call.handleFailure(new IllegalStateException("Cannot accept new calls when AdminClient is closing.")); } else if (metadataManager.usingBootstrapControllers() && - (!call.nodeProvider.supportsUseControllers())) { + (!call.nodeProvider.supportsUseControllers())) { call.fail(now, new UnsupportedEndpointTypeException("This Admin API is not " + "yet supported when communicating directly with the controller quorum.")); } else { @@ -1646,7 +1609,7 @@ private Call makeMetadataCall(long now) { private Call makeControllerMetadataCall(long now) { // Use DescribeCluster here, as specified by KIP-919. return new Call(true, "describeCluster", calcDeadlineMs(now, requestTimeoutMs), - new MetadataUpdateNodeIdProvider()) { + new MetadataUpdateNodeIdProvider()) { @Override public DescribeClusterRequest.Builder createRequest(int timeoutMs) { return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() @@ -1689,7 +1652,7 @@ private Call makeBrokerMetadataCall(long now) { // We use MetadataRequest here so that we can continue to support brokers that are too // old to handle DescribeCluster. return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs), - new MetadataUpdateNodeIdProvider()) { + new MetadataUpdateNodeIdProvider()) { @Override public MetadataRequest.Builder createRequest(int timeoutMs) { // Since this only requests node information, it's safe to pass true @@ -1778,10 +1741,10 @@ int numPendingCalls() { * Used when a response handler expected a result for some entity but no result was present. */ private static void completeUnrealizedFutures( - Stream>> futures, - Function messageFormatter) { + Stream>> futures, + Function messageFormatter) { futures.filter(entry -> !entry.getValue().isDone()).forEach(entry -> - entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey())))); + entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey())))); } /** @@ -1789,11 +1752,11 @@ private static void completeUnrealizedFutures( * the initial error back to the caller if the request timed out. */ private static void maybeCompleteQuotaExceededException( - boolean shouldRetryOnQuotaViolation, - Throwable throwable, - Map> futures, - Map quotaExceededExceptions, - int throttleTimeDelta) { + boolean shouldRetryOnQuotaViolation, + Throwable throwable, + Map> futures, + Map quotaExceededExceptions, + int throttleTimeDelta) { if (shouldRetryOnQuotaViolation && throwable instanceof TimeoutException) { quotaExceededExceptions.forEach((key, value) -> futures.get(key).completeExceptionally( new ThrottlingQuotaExceededException( @@ -2072,10 +2035,10 @@ private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options, @Override DeleteTopicsRequest.Builder createRequest(int timeoutMs) { return new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopics(topicIds.stream().map( - topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList())) - .setTimeoutMs(timeoutMs)); + new DeleteTopicsRequestData() + .setTopics(topicIds.stream().map( + topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList())) + .setTimeoutMs(timeoutMs)); } @Override @@ -2095,7 +2058,7 @@ void handleResponse(AbstractResponse abstractResponse) { if (error.isFailure()) { if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) { ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException( - response.throttleTimeMs(), error.messageWithFallback()); + response.throttleTimeMs(), error.messageWithFallback()); if (options.shouldRetryOnQuotaViolation()) { retryTopics.add(result.topicId()); retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException); @@ -2118,7 +2081,7 @@ void handleResponse(AbstractResponse abstractResponse) { } else { final long now = time.milliseconds(); final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics, - retryTopicQuotaExceededExceptions, now, deadline); + retryTopicQuotaExceededExceptions, now, deadline); runnable.call(call, now); } } @@ -2128,7 +2091,7 @@ void handleFailure(Throwable throwable) { // If there were any topics retries due to a quota exceeded exception, we propagate // the initial error back to the caller if the request timed out. maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), - throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now)); + throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now)); // Fail all the other remaining futures completeAllExceptionally(futures.values(), throwable); } @@ -2315,7 +2278,7 @@ void handleResponse(AbstractResponse abstractResponse) { } if (partiallyFinishedTopicDescription != null && - (responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) { + (responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) { // We can't simply check nextTopicDescription != null here to close the partiallyFinishedTopicDescription. // Because the responseCursor topic may not show in the response. String topicName = partiallyFinishedTopicDescription.name(); @@ -2371,7 +2334,7 @@ private Map> handleDescribeTopicsByNamesWi } // First, we need to retrieve the node info. - DescribeClusterResult clusterResult = describeCluster(); + DescribeClusterResult clusterResult = describeCluster(new DescribeClusterOptions().timeoutMs(options.timeoutMs())); clusterResult.nodes().whenComplete( (nodes, exception) -> { if (exception != null) { @@ -2398,7 +2361,7 @@ private Map> handleDescribeTopicsByIds(Colle if (topicIdIsUnrepresentable(topicId)) { KafkaFutureImpl future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException("The given topic id '" + - topicId + "' cannot be represented in a request.")); + topicId + "' cannot be represented in a request.")); topicFutures.put(topicId, future); } else if (!topicFutures.containsKey(topicId)) { topicFutures.put(topicId, new KafkaFutureImpl<>()); @@ -2407,14 +2370,14 @@ private Map> handleDescribeTopicsByIds(Colle } final long now = time.milliseconds(); Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)) - .setAllowAutoTopicCreation(false) - .setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations())); + .setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)) + .setAllowAutoTopicCreation(false) + .setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations())); } @Override @@ -2476,8 +2439,8 @@ private TopicDescription getTopicDescriptionFromCluster(Cluster cluster, String List partitions = new ArrayList<>(partitionInfos.size()); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo( - partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), - Arrays.asList(partitionInfo.inSyncReplicas())); + partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), + Arrays.asList(partitionInfo.inSyncReplicas())); partitions.add(topicPartitionInfo); } partitions.sort(Comparator.comparingInt(TopicPartitionInfo::partition)); @@ -2512,7 +2475,7 @@ AbstractRequest.Builder createRequest(int timeoutMs) { return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() .setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()) .setEndpointType(metadataManager.usingBootstrapControllers() ? - EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()) + EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()) .setIncludeFencedBrokers(options.includeFencedBrokers())); } else { // Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it @@ -2531,8 +2494,7 @@ void handleResponse(AbstractResponse abstractResponse) { DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { - ApiError apiError = new ApiError(error, response.data().errorMessage()); - handleFailure(apiError.exception()); + handleFailure(error.exception(response.data().errorMessage())); return; } @@ -2596,7 +2558,7 @@ public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAc if (filter.isUnknown()) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidRequestException("The AclBindingFilter " + - "must not contain UNKNOWN elements.")); + "must not contain UNKNOWN elements.")); return new DescribeAclsResult(future); } final long now = time.milliseconds(); @@ -2728,10 +2690,9 @@ void handleResponse(AbstractResponse abstractResponse) { } else { List filterResults = new ArrayList<>(); for (DeleteAclsMatchingAcl matchingAcl : filterResult.matchingAcls()) { - ApiError aclError = new ApiError(Errors.forCode(matchingAcl.errorCode()), - matchingAcl.errorMessage()); + Errors aclError = Errors.forCode(matchingAcl.errorCode()); AclBinding aclBinding = DeleteAclsResponse.aclBinding(matchingAcl); - filterResults.add(new FilterResult(aclBinding, aclError.exception())); + filterResults.add(new FilterResult(aclBinding, aclError.exception(matchingAcl.errorMessage()))); } future.complete(new FilterResults(filterResults)); } @@ -2796,15 +2757,15 @@ void handleResponse(AbstractResponse abstractResponse) { if (future == null) { if (node != null) { log.warn("The config {} in the response from node {} is not in the request", - configResource, node); + configResource, node); } else { log.warn("The config {} in the response from the least loaded broker is not in the request", - configResource); + configResource); } } else { if (describeConfigsResult.errorCode() != Errors.NONE.code()) { future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()) - .exception(describeConfigsResult.errorMessage())); + .exception(describeConfigsResult.errorMessage())); } else { future.complete(describeConfigResult(describeConfigsResult)); } @@ -2840,15 +2801,15 @@ void handleFailure(Throwable throwable) { private Config describeConfigResult(DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult) { return new Config(describeConfigsResult.configs().stream().map(config -> new ConfigEntry( - config.name(), - config.value(), - DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(), - config.isSensitive(), - config.readOnly(), - (config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(), - DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()), - DescribeConfigsResponse.ConfigType.forId(config.configType()).type(), - config.documentation() + config.name(), + config.value(), + DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(), + config.isSensitive(), + config.readOnly(), + (config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(), + DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()), + DescribeConfigsResponse.ConfigType.forId(config.configType()).type(), + config.documentation() )).collect(Collectors.toList())); } @@ -2960,7 +2921,7 @@ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map()); Map replicaAssignmentByBroker = new HashMap<>(); - for (Map.Entry entry: replicaAssignment.entrySet()) { + for (Map.Entry entry : replicaAssignment.entrySet()) { TopicPartitionReplica replica = entry.getKey(); String logDir = entry.getValue(); int brokerId = replica.brokerId(); @@ -2981,7 +2942,7 @@ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map entry: replicaAssignmentByBroker.entrySet()) { + for (Map.Entry entry : replicaAssignmentByBroker.entrySet()) { final int brokerId = entry.getKey(); final AlterReplicaLogDirsRequestData assignment = entry.getValue(); @@ -2996,15 +2957,15 @@ public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) { @Override public void handleResponse(AbstractResponse abstractResponse) { AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse; - for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) { - for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) { + for (AlterReplicaLogDirTopicResult topicResult : response.data().results()) { + for (AlterReplicaLogDirPartitionResult partitionResult : topicResult.partitions()) { TopicPartitionReplica replica = new TopicPartitionReplica( - topicResult.topicName(), partitionResult.partitionIndex(), brokerId); + topicResult.topicName(), partitionResult.partitionIndex(), brokerId); KafkaFutureImpl future = futures.get(replica); if (future == null) { log.warn("The partition {} in the response from broker {} is not in the request", - new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), - brokerId); + new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), + brokerId); } else if (partitionResult.errorCode() == Errors.NONE.code()) { future.complete(null); } else { @@ -3016,8 +2977,9 @@ public void handleResponse(AbstractResponse abstractResponse) { completeUnrealizedFutures( futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId), replica -> "The response from broker " + brokerId + - " did not contain a result for replica " + replica); + " did not contain a result for replica " + replica); } + @Override void handleFailure(Throwable throwable) { // Only completes the futures of brokerId @@ -3060,11 +3022,12 @@ public void handleResponse(AbstractResponse abstractResponse) { } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None Errors error = response.data().errorCode() == Errors.NONE.code() - ? Errors.CLUSTER_AUTHORIZATION_FAILED - : Errors.forCode(response.data().errorCode()); + ? Errors.CLUSTER_AUTHORIZATION_FAILED + : Errors.forCode(response.data().errorCode()); future.completeExceptionally(error.exception()); } } + @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); @@ -3082,15 +3045,15 @@ private static Map logDirDescriptions(DescribeLogDirs for (DescribeLogDirsResponseData.DescribeLogDirsTopic t : logDirResult.topics()) { for (DescribeLogDirsResponseData.DescribeLogDirsPartition p : t.partitions()) { replicaInfoMap.put( - new TopicPartition(t.name(), p.partitionIndex()), - new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey())); + new TopicPartition(t.name(), p.partitionIndex()), + new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey())); } } result.put(logDirResult.logDir(), new LogDirDescription( - Errors.forCode(logDirResult.errorCode()).exception(), - replicaInfoMap, - logDirResult.totalBytes(), - logDirResult.usableBytes())); + Errors.forCode(logDirResult.errorCode()).exception(), + replicaInfoMap, + logDirResult.totalBytes(), + logDirResult.usableBytes())); } return result; } @@ -3105,7 +3068,7 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection partitionsByBroker = new HashMap<>(); - for (TopicPartitionReplica replica: replicas) { + for (TopicPartitionReplica replica : replicas) { DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(), brokerId -> new DescribeLogDirsRequestData()); DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic()); @@ -3113,7 +3076,7 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection partitions = new ArrayList<>(); partitions.add(replica.partition()); describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic()) - .setPartitions(partitions); + .setPartitions(partitions); requestData.topics().add(describableLogDirTopic); } else { describableLogDirTopic.partitions().add(replica.partition()); @@ -3121,11 +3084,11 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection entry: partitionsByBroker.entrySet()) { + for (Map.Entry entry : partitionsByBroker.entrySet()) { final int brokerId = entry.getKey(); final DescribeLogDirsRequestData topicPartitions = entry.getValue(); final Map replicaDirInfoByPartition = new HashMap<>(); - for (DescribableLogDirTopic topicPartition: topicPartitions.topics()) { + for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) { for (Integer partitionId : topicPartition.partitions()) { replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo()); } @@ -3143,7 +3106,7 @@ public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { @Override public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; - for (Map.Entry responseEntry: logDirDescriptions(response).entrySet()) { + for (Map.Entry responseEntry : logDirDescriptions(response).entrySet()) { String logDir = responseEntry.getKey(); LogDirDescription logDirInfo = responseEntry.getValue(); @@ -3154,7 +3117,7 @@ public void handleResponse(AbstractResponse abstractResponse) { handleFailure(new IllegalStateException( "The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal")); - for (Map.Entry replicaInfoEntry: logDirInfo.replicaInfos().entrySet()) { + for (Map.Entry replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) { TopicPartition tp = replicaInfoEntry.getKey(); ReplicaInfo replicaInfo = replicaInfoEntry.getValue(); ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp); @@ -3162,24 +3125,25 @@ public void handleResponse(AbstractResponse abstractResponse) { log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp); } else if (replicaInfo.isFuture()) { replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), - replicaLogDirInfo.getCurrentReplicaOffsetLag(), - logDir, - replicaInfo.offsetLag())); + replicaLogDirInfo.getCurrentReplicaOffsetLag(), + logDir, + replicaInfo.offsetLag())); } else { replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, - replicaInfo.offsetLag(), - replicaLogDirInfo.getFutureReplicaLogDir(), - replicaLogDirInfo.getFutureReplicaOffsetLag())); + replicaInfo.offsetLag(), + replicaLogDirInfo.getFutureReplicaLogDir(), + replicaLogDirInfo.getFutureReplicaOffsetLag())); } } } - for (Map.Entry entry: replicaDirInfoByPartition.entrySet()) { + for (Map.Entry entry : replicaDirInfoByPartition.entrySet()) { TopicPartition tp = entry.getKey(); KafkaFutureImpl future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId)); future.complete(entry.getValue()); } } + @Override void handleFailure(Throwable throwable) { completeAllExceptionally(futures.values(), throwable); @@ -3315,8 +3279,8 @@ public CreateDelegationTokenResult createDelegationToken(final CreateDelegationT List renewers = new ArrayList<>(); for (KafkaPrincipal principal : options.renewers()) { renewers.add(new CreatableRenewers() - .setPrincipalName(principal.getName()) - .setPrincipalType(principal.getPrincipalType())); + .setPrincipalName(principal.getName()) + .setPrincipalType(principal.getPrincipalType())); } runnable.call(new Call("createDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3340,7 +3304,7 @@ void handleResponse(AbstractResponse abstractResponse) { delegationTokenFuture.completeExceptionally(response.error().exception()); } else { CreateDelegationTokenResponseData data = response.data(); - TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()), + TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()), new KafkaPrincipal(data.tokenRequesterPrincipalType(), data.tokenRequesterPrincipalName()), options.renewers(), data.issueTimestampMs(), data.maxTimestampMs(), data.expiryTimestampMs()); DelegationToken token = new DelegationToken(tokenInfo, data.hmac()); @@ -3359,7 +3323,7 @@ void handleFailure(Throwable throwable) { @Override public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final RenewDelegationTokenOptions options) { - final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("renewDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3367,7 +3331,7 @@ public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final @Override RenewDelegationTokenRequest.Builder createRequest(int timeoutMs) { return new RenewDelegationTokenRequest.Builder( - new RenewDelegationTokenRequestData() + new RenewDelegationTokenRequestData() .setHmac(hmac) .setRenewPeriodMs(options.renewTimePeriodMs())); } @@ -3393,7 +3357,7 @@ void handleFailure(Throwable throwable) { @Override public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, final ExpireDelegationTokenOptions options) { - final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("expireDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3401,9 +3365,9 @@ public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, fina @Override ExpireDelegationTokenRequest.Builder createRequest(int timeoutMs) { return new ExpireDelegationTokenRequest.Builder( - new ExpireDelegationTokenRequestData() - .setHmac(hmac) - .setExpiryTimePeriodMs(options.expiryTimePeriodMs())); + new ExpireDelegationTokenRequestData() + .setHmac(hmac) + .setExpiryTimePeriodMs(options.expiryTimePeriodMs())); } @Override @@ -3427,7 +3391,7 @@ void handleFailure(Throwable throwable) { @Override public DescribeDelegationTokenResult describeDelegationToken(final DescribeDelegationTokenOptions options) { - final KafkaFutureImpl> tokensFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl> tokensFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("describeDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3541,27 +3505,29 @@ ListGroupsRequest.Builder createRequest(int timeoutMs) { } private void maybeAddGroup(ListGroupsResponseData.ListedGroup group) { - final String groupId = group.groupId(); - final Optional type; - if (group.groupType() == null || group.groupType().isEmpty()) { - type = Optional.empty(); - } else { - type = Optional.of(GroupType.parse(group.groupType())); - } - final String protocolType = group.protocolType(); - final Optional groupState; - if (group.groupState() == null || group.groupState().isEmpty()) { - groupState = Optional.empty(); - } else { - groupState = Optional.of(GroupState.parse(group.groupState())); + String protocolType = group.protocolType(); + if (options.protocolTypes().isEmpty() || options.protocolTypes().contains(protocolType)) { + final String groupId = group.groupId(); + final Optional type; + if (group.groupType() == null || group.groupType().isEmpty()) { + type = Optional.empty(); + } else { + type = Optional.of(GroupType.parse(group.groupType())); + } + final Optional groupState; + if (group.groupState() == null || group.groupState().isEmpty()) { + groupState = Optional.empty(); + } else { + groupState = Optional.of(GroupState.parse(group.groupState())); + } + final GroupListing groupListing = new GroupListing( + groupId, + type, + protocolType, + groupState + ); + results.addListing(groupListing); } - final GroupListing groupListing = new GroupListing( - groupId, - type, - protocolType, - groupState - ); - results.addListing(groupListing); } @Override @@ -3607,11 +3573,11 @@ void handleFailure(Throwable throwable) { public DescribeConsumerGroupsResult describeConsumerGroups(final Collection groupIds, final DescribeConsumerGroupsOptions options) { SimpleAdminApiFuture future = - DescribeConsumerGroupsHandler.newFuture(groupIds); + DescribeConsumerGroupsHandler.newFuture(groupIds); DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(options.includeAuthorizedOperations(), logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeConsumerGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } @Deprecated @@ -3688,13 +3654,13 @@ void handleResponse(AbstractResponse abstractResponse) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { List states = options.groupStates() - .stream() - .map(GroupState::toString) - .collect(Collectors.toList()); + .stream() + .map(GroupState::toString) + .collect(Collectors.toList()); List groupTypes = options.types() - .stream() - .map(GroupType::toString) - .collect(Collectors.toList()); + .stream() + .map(GroupType::toString) + .collect(Collectors.toList()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setStatesFilter(states) .setTypesFilter(groupTypes) @@ -3706,17 +3672,17 @@ private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); final Optional groupState = group.groupState().isEmpty() - ? Optional.empty() - : Optional.of(GroupState.parse(group.groupState())); + ? Optional.empty() + : Optional.of(GroupState.parse(group.groupState())); final Optional type = group.groupType().isEmpty() - ? Optional.empty() - : Optional.of(GroupType.parse(group.groupType())); + ? Optional.empty() + : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( - groupId, - groupState, - type, - protocolType.isEmpty() - ); + groupId, + groupState, + type, + protocolType.isEmpty() + ); results.addListing(groupListing); } } @@ -3764,7 +3730,7 @@ void handleFailure(Throwable throwable) { public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, ListConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture> future = - ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet()); + ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet()); ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext); invokeDriver(handler, future, options.timeoutMs); @@ -3773,37 +3739,42 @@ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, - ListStreamsGroupOffsetsOptions options) { + ListStreamsGroupOffsetsOptions options) { Map consumerGroupSpecs = groupSpecs.entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions()) )); - return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions())); + ListConsumerGroupOffsetsOptions consumerGroupOptions = new ListConsumerGroupOffsetsOptions() + .requireStable(options.requireStable()) + .timeoutMs(options.timeoutMs()); + return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, consumerGroupOptions)); } @Override public DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options) { SimpleAdminApiFuture future = - DeleteConsumerGroupsHandler.newFuture(groupIds); + DeleteConsumerGroupsHandler.newFuture(groupIds); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } @Override public DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options) { - return new DeleteStreamsGroupsResult(deleteConsumerGroups(groupIds, new DeleteConsumerGroupsOptions())); + DeleteConsumerGroupsOptions consumerGroupOptions = new DeleteConsumerGroupsOptions() + .timeoutMs(options.timeoutMs()); + return new DeleteStreamsGroupsResult(deleteConsumerGroups(groupIds, consumerGroupOptions)); } @Override public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets( - String groupId, - Set partitions, - DeleteConsumerGroupOffsetsOptions options) { + String groupId, + Set partitions, + DeleteConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture> future = - DeleteConsumerGroupOffsetsHandler.newFuture(groupId); + DeleteConsumerGroupOffsetsHandler.newFuture(groupId); DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, partitions, logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); @@ -3814,23 +3785,27 @@ public DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets( String groupId, Set partitions, DeleteStreamsGroupOffsetsOptions options) { - return new DeleteStreamsGroupOffsetsResult(deleteConsumerGroupOffsets(groupId, partitions, new DeleteConsumerGroupOffsetsOptions())); + DeleteConsumerGroupOffsetsOptions consumerGroupOptions = new DeleteConsumerGroupOffsetsOptions() + .timeoutMs(options.timeoutMs()); + return new DeleteStreamsGroupOffsetsResult(deleteConsumerGroupOffsets(groupId, partitions, consumerGroupOptions)); } @Override public DescribeShareGroupsResult describeShareGroups(final Collection groupIds, final DescribeShareGroupsOptions options) { SimpleAdminApiFuture future = - DescribeShareGroupsHandler.newFuture(groupIds); + DescribeShareGroupsHandler.newFuture(groupIds); DescribeShareGroupsHandler handler = new DescribeShareGroupsHandler(options.includeAuthorizedOperations(), logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeShareGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } @Override - public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map offsets, AlterShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = AlterShareGroupOffsetsHandler.newFuture(groupId); + public AlterShareGroupOffsetsResult alterShareGroupOffsets(final String groupId, + final Map offsets, + final AlterShareGroupOffsetsOptions options) { + SimpleAdminApiFuture> future = AlterShareGroupOffsetsHandler.newFuture(groupId); AlterShareGroupOffsetsHandler handler = new AlterShareGroupOffsetsHandler(groupId, offsets, logContext); invokeDriver(handler, future, options.timeoutMs); return new AlterShareGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); @@ -3839,18 +3814,20 @@ public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map groupSpecs, final ListShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = ListShareGroupOffsetsHandler.newFuture(groupSpecs.keySet()); + SimpleAdminApiFuture> future = ListShareGroupOffsetsHandler.newFuture(groupSpecs.keySet()); ListShareGroupOffsetsHandler handler = new ListShareGroupOffsetsHandler(groupSpecs, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListShareGroupOffsetsResult(future.all()); } @Override - public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set partitions, DeleteShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = DeleteShareGroupOffsetsHandler.newFuture(groupId); - DeleteShareGroupOffsetsHandler handler = new DeleteShareGroupOffsetsHandler(groupId, partitions, logContext); + public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(final String groupId, + final Set topics, + final DeleteShareGroupOffsetsOptions options) { + SimpleAdminApiFuture> future = DeleteShareGroupOffsetsHandler.newFuture(groupId); + DeleteShareGroupOffsetsHandler handler = new DeleteShareGroupOffsetsHandler(groupId, topics, logContext); invokeDriver(handler, future, options.timeoutMs); - return new DeleteShareGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); + return new DeleteShareGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), topics); } @Override @@ -3892,13 +3869,13 @@ public DeleteShareGroupsResult deleteShareGroups(Collection groupIds, De @Override public ElectLeadersResult electLeaders( - final ElectionType electionType, - final Set topicPartitions, - ElectLeadersOptions options) { + final ElectionType electionType, + final Set topicPartitions, + ElectLeadersOptions options) { final KafkaFutureImpl>> electionFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("electLeaders", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider()) { + new ControllerNodeProvider()) { @Override public ElectLeadersRequest.Builder createRequest(int timeoutMs) { @@ -3931,8 +3908,8 @@ void handleFailure(Throwable throwable) { @Override public AlterPartitionReassignmentsResult alterPartitionReassignments( - Map> reassignments, - AlterPartitionReassignmentsOptions options) { + Map> reassignments, + AlterPartitionReassignmentsOptions options) { final Map> futures = new HashMap<>(); final Map>> topicsToReassignments = new TreeMap<>(); for (Map.Entry> entry : reassignments.entrySet()) { @@ -3945,13 +3922,13 @@ public AlterPartitionReassignmentsResult alterPartitionReassignments( if (topicNameIsUnrepresentable(topic)) { future.completeExceptionally(new InvalidTopicException("The given topic name '" + - topic + "' cannot be represented in a request.")); + topic + "' cannot be represented in a request.")); } else if (topicPartition.partition() < 0) { future.completeExceptionally(new InvalidTopicException("The given partition index " + - topicPartition.partition() + " is not valid.")); + topicPartition.partition() + " is not valid.")); } else { Map> partitionReassignments = - topicsToReassignments.get(topicPartition.topic()); + topicsToReassignments.get(topicPartition.topic()); if (partitionReassignments == null) { partitionReassignments = new TreeMap<>(); topicsToReassignments.put(topic, partitionReassignments); @@ -3963,32 +3940,32 @@ public AlterPartitionReassignmentsResult alterPartitionReassignments( final long now = time.milliseconds(); Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider(true)) { + new ControllerNodeProvider(true)) { @Override public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) { AlterPartitionReassignmentsRequestData data = - new AlterPartitionReassignmentsRequestData(); + new AlterPartitionReassignmentsRequestData(); for (Map.Entry>> entry : - topicsToReassignments.entrySet()) { + topicsToReassignments.entrySet()) { String topicName = entry.getKey(); Map> partitionsToReassignments = entry.getValue(); List reassignablePartitions = new ArrayList<>(); for (Map.Entry> partitionEntry : - partitionsToReassignments.entrySet()) { + partitionsToReassignments.entrySet()) { int partitionIndex = partitionEntry.getKey(); Optional reassignment = partitionEntry.getValue(); ReassignablePartition reassignablePartition = new ReassignablePartition() - .setPartitionIndex(partitionIndex) - .setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null)); + .setPartitionIndex(partitionIndex) + .setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null)); reassignablePartitions.add(reassignablePartition); } ReassignableTopic reassignableTopic = new ReassignableTopic() - .setName(topicName) - .setPartitions(reassignablePartitions); + .setName(topicName) + .setPartitions(reassignablePartitions); data.topics().add(reassignableTopic); } data.setTimeoutMs(timeoutMs); @@ -4015,8 +3992,8 @@ public void handleResponse(AbstractResponse abstractResponse) { String topicName = topicResponse.name(); for (ReassignablePartitionResponse partition : topicResponse.partitions()) { errors.put( - new TopicPartition(topicName, partition.partitionIndex()), - new ApiError(topLevelError, response.data().errorMessage()).exception() + new TopicPartition(topicName, partition.partitionIndex()), + topLevelError.exception(response.data().errorMessage()) ); receivedResponsesCount += 1; } @@ -4056,7 +4033,7 @@ private int validateTopicResponses(List topicResponse if (partitionError == Errors.NONE) { errors.put(tp, null); } else { - errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception()); + errors.put(tp, partitionError.exception(partResponse.errorMessage())); } receivedResponsesCount += 1; } @@ -4088,10 +4065,10 @@ public ListPartitionReassignmentsResult listPartitionReassignments(Optional reassignmentMap = new HashMap<>(); @@ -4180,7 +4157,7 @@ private void handleNotControllerError(Errors error) throws ApiException { */ private Integer nodeFor(ConfigResource resource) { if ((resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) - || resource.type() == ConfigResource.Type.BROKER_LOGGER) { + || resource.type() == ConfigResource.Type.BROKER_LOGGER) { return Integer.valueOf(resource.name()); } else { return null; @@ -4196,8 +4173,8 @@ private KafkaFutureImpl> getMembersFromGroup(String groupId } else { List membersToRemove = res.members().stream().map(member -> member.groupInstanceId().map(id -> new MemberIdentity().setGroupInstanceId(id)) - .orElseGet(() -> new MemberIdentity().setMemberId(member.consumerId())) - .setReason(reason) + .orElseGet(() -> new MemberIdentity().setMemberId(member.consumerId())) + .setReason(reason) ).collect(Collectors.toList()); future.complete(membersToRemove); @@ -4230,7 +4207,7 @@ public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(Strin DEFAULT_LEAVE_GROUP_REASON : JoinGroupRequest.maybeTruncateReason(options.reason()); final SimpleAdminApiFuture> adminFuture = - RemoveMembersFromConsumerGroupHandler.newFuture(groupId); + RemoveMembersFromConsumerGroupHandler.newFuture(groupId); KafkaFutureImpl> memFuture; if (options.removeAll()) { @@ -4238,8 +4215,8 @@ public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(Strin } else { memFuture = new KafkaFutureImpl<>(); memFuture.complete(options.members().stream() - .map(m -> m.toMemberIdentity().setReason(reason)) - .collect(Collectors.toList())); + .map(m -> m.toMemberIdentity().setReason(reason)) + .collect(Collectors.toList())); } memFuture.whenComplete((members, ex) -> { @@ -4261,7 +4238,7 @@ public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets( AlterConsumerGroupOffsetsOptions options ) { SimpleAdminApiFuture> future = - AlterConsumerGroupOffsetsHandler.newFuture(groupId); + AlterConsumerGroupOffsetsHandler.newFuture(groupId); AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext); invokeDriver(handler, future, options.timeoutMs); return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); @@ -4273,7 +4250,9 @@ public AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets( Map offsets, AlterStreamsGroupOffsetsOptions options ) { - return new AlterStreamsGroupOffsetsResult(alterConsumerGroupOffsets(groupId, offsets, new AlterConsumerGroupOffsetsOptions())); + AlterConsumerGroupOffsetsOptions consumerGroupOptions = new AlterConsumerGroupOffsetsOptions() + .timeoutMs(options.timeoutMs()); + return new AlterStreamsGroupOffsetsResult(alterConsumerGroupOffsets(groupId, offsets, consumerGroupOptions)); } @Override @@ -4294,24 +4273,24 @@ public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, final long now = time.milliseconds(); runnable.call(new Call("describeClientQuotas", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { - @Override - DescribeClientQuotasRequest.Builder createRequest(int timeoutMs) { - return new DescribeClientQuotasRequest.Builder(filter); - } + @Override + DescribeClientQuotasRequest.Builder createRequest(int timeoutMs) { + return new DescribeClientQuotasRequest.Builder(filter); + } - @Override - void handleResponse(AbstractResponse abstractResponse) { - DescribeClientQuotasResponse response = (DescribeClientQuotasResponse) abstractResponse; - response.complete(future); - } + @Override + void handleResponse(AbstractResponse abstractResponse) { + DescribeClientQuotasResponse response = (DescribeClientQuotasResponse) abstractResponse; + response.complete(future); + } - @Override - void handleFailure(Throwable throwable) { - future.completeExceptionally(throwable); - } - }, now); + @Override + void handleFailure(Throwable throwable) { + future.completeExceptionally(throwable); + } + }, now); return new DescribeClientQuotasResult(future); } @@ -4325,24 +4304,24 @@ public AlterClientQuotasResult alterClientQuotas(Collection dataFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); Call call = new Call("describeUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { @Override public DescribeUserScramCredentialsRequest.Builder createRequest(final int timeoutMs) { final DescribeUserScramCredentialsRequestData requestData = new DescribeUserScramCredentialsRequestData(); @@ -4398,7 +4377,7 @@ public AlterUserScramCredentialsResult alterUserScramCredentials(List> futures = new HashMap<>(); - for (UserScramCredentialAlteration alteration: alterations) { + for (UserScramCredentialAlteration alteration : alterations) { futures.put(alteration.user(), new KafkaFutureImpl<>()); } final Map userIllegalAlterationExceptions = new HashMap<>(); @@ -4422,55 +4401,55 @@ public AlterUserScramCredentialsResult alterUserScramCredentials(List> userInsertions = new HashMap<>(); alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion) - .filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())) - .forEach(alteration -> { - final String user = alteration.user(); - if (user == null || user.isEmpty()) { - userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg)); - } else { - UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration; - try { - byte[] password = upsertion.password(); - if (password == null || password.length == 0) { - userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg)); + .filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())) + .forEach(alteration -> { + final String user = alteration.user(); + if (user == null || user.isEmpty()) { + userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg)); + } else { + UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration; + try { + byte[] password = upsertion.password(); + if (password == null || password.length == 0) { + userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg)); + } else { + ScramMechanism mechanism = upsertion.credentialInfo().mechanism(); + if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) { + userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); } else { - ScramMechanism mechanism = upsertion.credentialInfo().mechanism(); - if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) { - userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); - } else { - userInsertions.putIfAbsent(user, new HashMap<>()); - userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion)); - } + userInsertions.putIfAbsent(user, new HashMap<>()); + userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion)); } - } catch (NoSuchAlgorithmException e) { - // we might overwrite an exception from a previous alteration, but we don't really care - // since we just need to mark this user as having at least one illegal alteration - // and make an exception instance available for completing the corresponding future exceptionally - userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); - } catch (InvalidKeyException e) { - // generally shouldn't happen since we deal with the empty password case above, - // but we still need to catch/handle it - userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e)); } + } catch (NoSuchAlgorithmException e) { + // we might overwrite an exception from a previous alteration, but we don't really care + // since we just need to mark this user as having at least one illegal alteration + // and make an exception instance available for completing the corresponding future exceptionally + userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); + } catch (InvalidKeyException e) { + // generally shouldn't happen since we deal with the empty password case above, + // but we still need to catch/handle it + userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e)); } - }); + } + }); // submit alterations only for users that do not have an illegal alteration as identified above Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider()) { + new ControllerNodeProvider()) { @Override public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) { return new AlterUserScramCredentialsRequest.Builder( - new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream() - .filter(a -> a instanceof UserScramCredentialUpsertion) - .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) - .map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())) - .collect(Collectors.toList())) + new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream() + .filter(a -> a instanceof UserScramCredentialUpsertion) + .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) + .map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())) + .collect(Collectors.toList())) .setDeletions(alterations.stream() - .filter(a -> a instanceof UserScramCredentialDeletion) - .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) - .map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)) - .collect(Collectors.toList()))); + .filter(a -> a instanceof UserScramCredentialDeletion) + .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) + .map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)) + .collect(Collectors.toList()))); } @Override @@ -4487,8 +4466,8 @@ public void handleResponse(AbstractResponse abstractResponse) { * Be sure to do this after the NOT_CONTROLLER error check above * so that all errors are consistent in that case. */ - userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> - futures.get(entry.getKey()).completeExceptionally(entry.getValue()) + userIllegalAlterationExceptions.forEach((key, value) -> + futures.get(key).completeExceptionally(value) ); response.data().results().forEach(result -> { KafkaFutureImpl future = futures.get(result.user()); @@ -4520,10 +4499,10 @@ void handleFailure(Throwable throwable) { private static AlterUserScramCredentialsRequestData.ScramCredentialUpsertion getScramCredentialUpsertion(UserScramCredentialUpsertion u) throws InvalidKeyException, NoSuchAlgorithmException { AlterUserScramCredentialsRequestData.ScramCredentialUpsertion retval = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion(); return retval.setName(u.user()) - .setMechanism(u.credentialInfo().mechanism().type()) - .setIterations(u.credentialInfo().iterations()) - .setSalt(u.salt()) - .setSaltedPassword(getSaltedPassword(u.credentialInfo().mechanism(), u.password(), u.salt(), u.credentialInfo().iterations())); + .setMechanism(u.credentialInfo().mechanism().type()) + .setIterations(u.credentialInfo().iterations()) + .setSalt(u.salt()) + .setSaltedPassword(getSaltedPassword(u.credentialInfo().mechanism(), u.password(), u.salt(), u.credentialInfo().iterations())); } private static AlterUserScramCredentialsRequestData.ScramCredentialDeletion getScramCredentialDeletion(UserScramCredentialDeletion d) { @@ -4532,7 +4511,7 @@ private static AlterUserScramCredentialsRequestData.ScramCredentialDeletion getS private static byte[] getSaltedPassword(ScramMechanism publicScramMechanism, byte[] password, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeyException { return new ScramFormatter(org.apache.kafka.common.security.scram.internals.ScramMechanism.forMechanismName(publicScramMechanism.mechanismName())) - .hi(password, salt, iterations); + .hi(password, salt, iterations); } @Override @@ -4658,7 +4637,7 @@ void handleResponse(AbstractResponse abstractResponse) { } // The server should send back a response for every feature, but we do a sanity check anyway. completeUnrealizedFutures(updateFutures.entrySet().stream(), - feature -> "The controller response did not contain a result for feature " + feature); + feature -> "The controller response did not contain a result for feature " + feature); } break; case NOT_CONTROLLER: @@ -4689,15 +4668,15 @@ public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuoru final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) { + "describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) { private QuorumInfo.ReplicaState translateReplicaState(DescribeQuorumResponseData.ReplicaState replica) { return new QuorumInfo.ReplicaState( - replica.replicaId(), - replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(), - replica.logEndOffset(), - replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()), - replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp())); + replica.replicaId(), + replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(), + replica.logEndOffset(), + replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()), + replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp())); } private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.PartitionData partition, DescribeQuorumResponseData.NodeCollection nodeCollection) { @@ -4730,7 +4709,7 @@ private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.Partition @Override DescribeQuorumRequest.Builder createRequest(int timeoutMs) { return new Builder(DescribeQuorumRequest.singletonRequest( - new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition()))); + new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition()))); } @Override @@ -4742,27 +4721,27 @@ void handleResponse(AbstractResponse response) { } if (quorumResponse.data().topics().size() != 1) { String msg = String.format("DescribeMetadataQuorum received %d topics when 1 was expected", - quorumResponse.data().topics().size()); + quorumResponse.data().topics().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.TopicData topic = quorumResponse.data().topics().get(0); if (!topic.topicName().equals(CLUSTER_METADATA_TOPIC_NAME)) { String msg = String.format("DescribeMetadataQuorum received a topic with name %s when %s was expected", - topic.topicName(), CLUSTER_METADATA_TOPIC_NAME); + topic.topicName(), CLUSTER_METADATA_TOPIC_NAME); log.debug(msg); throw new UnknownServerException(msg); } if (topic.partitions().size() != 1) { String msg = String.format("DescribeMetadataQuorum received a topic %s with %d partitions when 1 was expected", - topic.topicName(), topic.partitions().size()); + topic.topicName(), topic.partitions().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.PartitionData partition = topic.partitions().get(0); if (partition.partitionIndex() != CLUSTER_METADATA_TOPIC_PARTITION.partition()) { String msg = String.format("DescribeMetadataQuorum received a single partition with index %d when %d was expected", - partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition()); + partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition()); log.debug(msg); throw new UnknownServerException(msg); } @@ -4787,19 +4766,19 @@ public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOpt final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedBrokerOrActiveKController()) { + new LeastLoadedBrokerOrActiveKController()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = - new UnregisterBrokerRequestData().setBrokerId(brokerId); + new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = - (UnregisterBrokerResponse) abstractResponse; + (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: @@ -4859,7 +4838,7 @@ public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortT * where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed. *

* - * @param transactionalId The transactional ID whose active transaction should be forcefully terminated. + * @param transactionalId The transactional ID whose active transaction should be forcefully terminated. * @return a {@link TerminateTransactionResult} that can be used to await the operation result. */ @Override @@ -4898,6 +4877,45 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, return new FenceProducersResult(future.all()); } + @Override + public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { + final long now = time.milliseconds(); + final KafkaFutureImpl> future = new KafkaFutureImpl<>(); + final Call call = new Call("listConfigResources", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { + + @Override + ListConfigResourcesRequest.Builder createRequest(int timeoutMs) { + return new ListConfigResourcesRequest.Builder( + new ListConfigResourcesRequestData() + .setResourceTypes( + configResourceTypes + .stream() + .map(ConfigResource.Type::id) + .collect(Collectors.toList()) + ) + ); + } + + @Override + void handleResponse(AbstractResponse abstractResponse) { + ListConfigResourcesResponse response = (ListConfigResourcesResponse) abstractResponse; + if (response.error().isFailure()) { + future.completeExceptionally(response.error().exception()); + } else { + future.complete(response.configResources()); + } + } + + @Override + void handleFailure(Throwable throwable) { + future.completeExceptionally(throwable); + } + }; + runnable.call(call, now); + return new ListConfigResourcesResult(future); + } + + @SuppressWarnings({"deprecation", "removal"}) @Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { final long now = time.milliseconds(); @@ -4906,17 +4924,26 @@ public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMet new LeastLoadedNodeProvider()) { @Override - ListClientMetricsResourcesRequest.Builder createRequest(int timeoutMs) { - return new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()); + ListConfigResourcesRequest.Builder createRequest(int timeoutMs) { + return new ListConfigResourcesRequest.Builder( + new ListConfigResourcesRequestData() + .setResourceTypes(List.of(ConfigResource.Type.CLIENT_METRICS.id())) + ); } @Override void handleResponse(AbstractResponse abstractResponse) { - ListClientMetricsResourcesResponse response = (ListClientMetricsResourcesResponse) abstractResponse; + ListConfigResourcesResponse response = (ListConfigResourcesResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { - future.complete(response.clientMetricsResources()); + future.complete(response + .data() + .configResources() + .stream() + .filter(entry -> entry.resourceType() == ConfigResource.Type.CLIENT_METRICS.id()) + .map(entry -> new ClientMetricsResourceListing(entry.resourceName())) + .collect(Collectors.toList())); } } @@ -4940,7 +4967,7 @@ public AddRaftVoterResult addRaftVoter( final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "addRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { + "addRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { @Override AddRaftVoterRequest.Builder createRequest(int timeoutMs) { @@ -4948,30 +4975,27 @@ AddRaftVoterRequest.Builder createRequest(int timeoutMs) { new AddRaftVoterRequestData.ListenerCollection(); endpoints.forEach(endpoint -> listeners.add(new AddRaftVoterRequestData.Listener(). - setName(endpoint.name()). + setName(endpoint.listener()). setHost(endpoint.host()). setPort(endpoint.port()))); return new AddRaftVoterRequest.Builder( - new AddRaftVoterRequestData(). - setClusterId(options.clusterId().orElse(null)). - setTimeoutMs(timeoutMs). - setVoterId(voterId) . - setVoterDirectoryId(voterDirectoryId). - setListeners(listeners)); + new AddRaftVoterRequestData(). + setClusterId(options.clusterId().orElse(null)). + setTimeoutMs(timeoutMs). + setVoterId(voterId). + setVoterDirectoryId(voterDirectoryId). + setListeners(listeners)); } @Override void handleResponse(AbstractResponse response) { handleNotControllerError(response); AddRaftVoterResponse addResponse = (AddRaftVoterResponse) response; - if (addResponse.data().errorCode() != Errors.NONE.code()) { - ApiError error = new ApiError( - addResponse.data().errorCode(), - addResponse.data().errorMessage()); - future.completeExceptionally(error.exception()); - } else { + Errors error = Errors.forCode(addResponse.data().errorCode()); + if (error != Errors.NONE) + future.completeExceptionally(error.exception(addResponse.data().errorMessage())); + else future.complete(null); - } } @Override @@ -4994,14 +5018,14 @@ public RemoveRaftVoterResult removeRaftVoter( final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { + "removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { @Override RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) { return new RemoveRaftVoterRequest.Builder( new RemoveRaftVoterRequestData(). setClusterId(options.clusterId().orElse(null)). - setVoterId(voterId) . + setVoterId(voterId). setVoterDirectoryId(voterDirectoryId)); } @@ -5009,14 +5033,11 @@ RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) { void handleResponse(AbstractResponse response) { handleNotControllerError(response); RemoveRaftVoterResponse addResponse = (RemoveRaftVoterResponse) response; - if (addResponse.data().errorCode() != Errors.NONE.code()) { - ApiError error = new ApiError( - addResponse.data().errorCode(), - addResponse.data().errorMessage()); - future.completeExceptionally(error.exception()); - } else { + Errors error = Errors.forCode(addResponse.data().errorCode()); + if (error != Errors.NONE) + future.completeExceptionally(error.exception(addResponse.data().errorMessage())); + else future.complete(null); - } } @Override @@ -5125,6 +5146,8 @@ private static long getOffsetFromSpec(OffsetSpec offsetSpec) { return ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP; } else if (offsetSpec instanceof OffsetSpec.LatestTieredSpec) { return ListOffsetsRequest.LATEST_TIERED_TIMESTAMP; + } else if (offsetSpec instanceof OffsetSpec.EarliestPendingUploadSpec) { + return ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP; } return ListOffsetsRequest.LATEST_TIMESTAMP; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java index 7b6dbf302c65e..f90778db12ce6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java @@ -19,6 +19,8 @@ /** * Options for {@link Admin#listClientMetricsResources()}. + * @deprecated Since 4.1. Use {@link ListConfigResourcesOptions} instead. */ +@Deprecated(since = "4.1") public class ListClientMetricsResourcesOptions extends AbstractOptions { } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java index 4a63e31c2381e..a4d0ed3cecb31 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java @@ -25,7 +25,9 @@ /** * The result of the {@link Admin#listClientMetricsResources()} call. *

+ * @deprecated Since 4.1. Use {@link ListConfigResourcesResult} instead. */ +@Deprecated(since = "4.1") public class ListClientMetricsResourcesResult { private final KafkaFuture> future; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesOptions.java new file mode 100644 index 0000000000000..dbd8581c7953b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesOptions.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +/** + * Options for {@link Admin#listConfigResources()}. + */ +public class ListConfigResourcesOptions extends AbstractOptions { +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesResult.java new file mode 100644 index 0000000000000..fa9ad46a72cad --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConfigResourcesResult.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.internals.KafkaFutureImpl; + +import java.util.Collection; + +/** + * The result of the {@link Admin#listConfigResources()} call. + *

+ */ +public class ListConfigResourcesResult { + private final KafkaFuture> future; + + ListConfigResourcesResult(KafkaFuture> future) { + this.future = future; + } + + /** + * Returns a future that yields either an exception, or the full set of config resources. + * + * In the event of a failure, the future yields nothing but the first exception which + * occurred. + */ + public KafkaFuture> all() { + final KafkaFutureImpl> result = new KafkaFutureImpl<>(); + future.whenComplete((resources, throwable) -> { + if (throwable != null) { + result.completeExceptionally(throwable); + } else { + result.complete(resources); + } + }); + return result; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java index e5d70133186cd..7d7083f46c5c7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java @@ -46,6 +46,24 @@ public static ListGroupsOptions forConsumerGroups() { .withProtocolTypes(Set.of("", ConsumerProtocol.PROTOCOL_TYPE)); } + /** + * Only share groups will be returned by listGroups(). + * This operation sets a filter on group type which select share groups. + */ + public static ListGroupsOptions forShareGroups() { + return new ListGroupsOptions() + .withTypes(Set.of(GroupType.SHARE)); + } + + /** + * Only streams groups will be returned by listGroups(). + * This operation sets a filter on group type which select streams groups. + */ + public static ListGroupsOptions forStreamsGroups() { + return new ListGroupsOptions() + .withTypes(Set.of(GroupType.STREAMS)); + } + /** * If groupStates is set, only groups in these states will be returned by listGroups(). * Otherwise, all groups are returned. @@ -56,6 +74,10 @@ public ListGroupsOptions inGroupStates(Set groupStates) { return this; } + /** + * If protocol types is set, only groups of these protocol types will be returned by listGroups(). + * Otherwise, all groups are returned. + */ public ListGroupsOptions withProtocolTypes(Set protocolTypes) { this.protocolTypes = (protocolTypes == null || protocolTypes.isEmpty()) ? Set.of() : Set.copyOf(protocolTypes); return this; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java index d39f3711f4c65..1a2c8869c6caf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java @@ -18,6 +18,7 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.clients.admin.internals.CoordinatorKey; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.annotation.InterfaceStability; @@ -35,9 +36,9 @@ @InterfaceStability.Evolving public class ListShareGroupOffsetsResult { - private final Map>> futures; + private final Map>> futures; - ListShareGroupOffsetsResult(final Map>> futures) { + ListShareGroupOffsetsResult(final Map>> futures) { this.futures = futures.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey().idValue, Map.Entry::getValue)); } @@ -45,12 +46,12 @@ public class ListShareGroupOffsetsResult { /** * Return the future when the requests for all groups succeed. * - * @return Future which yields all {@code Map>} objects, if requests for all the groups succeed. + * @return Future which yields all {@code Map>} objects, if requests for all the groups succeed. */ - public KafkaFuture>> all() { + public KafkaFuture>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( nil -> { - Map> offsets = new HashMap<>(futures.size()); + Map> offsets = new HashMap<>(futures.size()); futures.forEach((groupId, future) -> { try { offsets.put(groupId, future.get()); @@ -65,12 +66,10 @@ public KafkaFuture>> all() { } /** - * Return a future which yields a map of topic partitions to offsets for the specified group. - * - * @param groupId The group ID. - * @return Future which yields a map of topic partitions to offsets for the specified group. + * Return a future which yields a map of topic partitions to offsets for the specified group. If the group doesn't + * have a committed offset for a specific partition, the corresponding value in the returned map will be null. */ - public KafkaFuture> partitionsToOffset(String groupId) { + public KafkaFuture> partitionsToOffsetAndMetadata(String groupId) { if (!futures.containsKey(groupId)) { throw new IllegalArgumentException("Group ID not found: " + groupId); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.java index 05caf1dee1120..08835d817e63e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsOptions.java @@ -21,7 +21,7 @@ /** - * Options for {@link Admin#listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)}. + * Options for {@link Admin#listStreamsGroupOffsets(java.util.Map, ListStreamsGroupOffsetsOptions)}. *

* The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.java index 3f2ea17e92ea1..3abf578f71a4e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsResult.java @@ -50,7 +50,8 @@ public KafkaFuture>> all() { } /** - * Return a future which yields a map of topic partitions to offsets for the specified group. + * Return a future which yields a map of topic partitions to offsets for the specified group. If the group doesn't + * have a committed offset for a specific partition, the corresponding value in the returned map will be null. */ public KafkaFuture> partitionsToOffsetAndMetadata(String groupId) { return delegate.partitionsToOffsetAndMetadata(groupId); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java index c3fb9babb9a97..4f5380f749101 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListStreamsGroupOffsetsSpec.java @@ -21,9 +21,10 @@ import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Collection; +import java.util.Map; /** - * Specification of Streams group offsets to list using {@link Admin#listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)}. + * Specification of streams group offsets to list using {@link Admin#listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)}. *

* The API of this class is evolving, see {@link Admin} for details. */ @@ -33,17 +34,17 @@ public class ListStreamsGroupOffsetsSpec { private Collection topicPartitions; /** - * Set the topic partitions whose offsets are to be listed for a Streams group. + * Set the topic partitions whose offsets are to be listed for a streams group. */ - ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPartitions) { + public ListStreamsGroupOffsetsSpec topicPartitions(Collection topicPartitions) { this.topicPartitions = topicPartitions; return this; } /** - * Returns the topic partitions whose offsets are to be listed for a Streams group. + * Returns the topic partitions whose offsets are to be listed for a streams group. */ - Collection topicPartitions() { + public Collection topicPartitions() { return topicPartitions; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java index 0768b1a75a71c..72a796308d45b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java @@ -31,6 +31,7 @@ public class ListTransactionsOptions extends AbstractOptions filteredProducerIds = Collections.emptySet(); private long filteredDuration = -1L; + private String filteredTransactionalIdPattern; /** * Filter only the transactions that are in a specific set of states. If no filter * is specified or if the passed set of states is empty, then transactions in all @@ -70,6 +71,19 @@ public ListTransactionsOptions filterOnDuration(long durationMs) { return this; } + /** + * Filter only the transactions that match with the given transactional ID pattern. + * If the filter is null or if the passed string is empty, + * then all the transactions will be returned. + * + * @param pattern the transactional ID regular expression pattern to filter by + * @return this object + */ + public ListTransactionsOptions filterOnTransactionalIdPattern(String pattern) { + this.filteredTransactionalIdPattern = pattern; + return this; + } + /** * Returns the set of states to be filtered or empty if no states have been specified. * @@ -99,12 +113,23 @@ public long filteredDuration() { return filteredDuration; } + /** + * Returns transactional ID being filtered. + * + * @return the current transactional ID pattern filter (empty means no transactional IDs are filtered and all + * transactions will be returned) + */ + public String filteredTransactionalIdPattern() { + return filteredTransactionalIdPattern; + } + @Override public String toString() { return "ListTransactionsOptions(" + "filteredStates=" + filteredStates + ", filteredProducerIds=" + filteredProducerIds + ", filteredDuration=" + filteredDuration + + ", filteredTransactionalIdPattern=" + filteredTransactionalIdPattern + ", timeoutMs=" + timeoutMs + ')'; } @@ -116,11 +141,12 @@ public boolean equals(Object o) { ListTransactionsOptions that = (ListTransactionsOptions) o; return Objects.equals(filteredStates, that.filteredStates) && Objects.equals(filteredProducerIds, that.filteredProducerIds) && - Objects.equals(filteredDuration, that.filteredDuration); + Objects.equals(filteredDuration, that.filteredDuration) && + Objects.equals(filteredTransactionalIdPattern, that.filteredTransactionalIdPattern); } @Override public int hashCode() { - return Objects.hash(filteredStates, filteredProducerIds, filteredDuration); + return Objects.hash(filteredStates, filteredProducerIds, filteredDuration, filteredTransactionalIdPattern); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java index 665c86649ba37..340e88db16010 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java @@ -67,6 +67,7 @@ public Map replicaInfos() { /** * The total size of the volume this log directory is on or empty if the broker did not return a value. * For volumes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned. + * This value does not include the size of data stored in remote storage. */ public OptionalLong totalBytes() { return totalBytes; @@ -75,6 +76,7 @@ public OptionalLong totalBytes() { /** * The usable size on the volume this log directory is on or empty if the broker did not return a value. * For usable sizes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned. + * This value does not include the size of data stored in remote storage. */ public OptionalLong usableBytes() { return usableBytes; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java index 68f94cc493e5a..ad73c8d51f086 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java @@ -28,6 +28,7 @@ public static class LatestSpec extends OffsetSpec { } public static class MaxTimestampSpec extends OffsetSpec { } public static class EarliestLocalSpec extends OffsetSpec { } public static class LatestTieredSpec extends OffsetSpec { } + public static class EarliestPendingUploadSpec extends OffsetSpec { } public static class TimestampSpec extends OffsetSpec { private final long timestamp; @@ -91,4 +92,13 @@ public static OffsetSpec earliestLocal() { public static OffsetSpec latestTiered() { return new LatestTieredSpec(); } + + /** + * Used to retrieve the earliest offset of records that are pending upload to remote storage. + *
+ * Note: When tiered storage is not enabled, we will return unknown offset. + */ + public static OffsetSpec earliestPendingUpload() { + return new EarliestPendingUploadSpec(); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java b/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java index 984ac9993933d..ba5b39284ebea 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java @@ -26,7 +26,7 @@ */ @InterfaceStability.Stable public class RaftVoterEndpoint { - private final String name; + private final String listener; private final String host; private final int port; @@ -49,22 +49,33 @@ static String requireNonNullAllCapsNonEmpty(String input) { /** * Create an endpoint for a metadata quorum voter. * - * @param name The human-readable name for this endpoint. For example, CONTROLLER. + * @param listener The human-readable name for this endpoint. For example, CONTROLLER. * @param host The DNS hostname for this endpoint. * @param port The network port for this endpoint. */ public RaftVoterEndpoint( - String name, + String listener, String host, int port ) { - this.name = requireNonNullAllCapsNonEmpty(name); + this.listener = requireNonNullAllCapsNonEmpty(listener); this.host = Objects.requireNonNull(host); this.port = port; } + /** + * The listener name for this endpoint. + */ + public String listener() { + return listener; + } + + /** + * @deprecated Since 4.1. Use {@link #listener()} instead. This function will be removed in 5.0. + */ + @Deprecated(since = "4.1", forRemoval = true) public String name() { - return name; + return listener; } public String host() { @@ -79,20 +90,20 @@ public int port() { public boolean equals(Object o) { if (o == null || (!o.getClass().equals(getClass()))) return false; RaftVoterEndpoint other = (RaftVoterEndpoint) o; - return name.equals(other.name) && + return listener.equals(other.listener) && host.equals(other.host) && port == other.port; } @Override public int hashCode() { - return Objects.hash(name, host, port); + return Objects.hash(listener, host, port); } @Override public String toString() { // enclose IPv6 hosts in square brackets for readability String hostString = host.contains(":") ? "[" + host + "]" : host; - return name + "://" + hostString + ":" + port; + return listener + "://" + hostString + ":" + port; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java b/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java index d3da26b03bbb0..57421e3568b4f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java @@ -33,14 +33,16 @@ private RecordsToDelete(long offset) { /** * Delete all the records before the given {@code offset} * - * @param offset the offset before which all records will be deleted + * @param offset The offset before which all records will be deleted. + * Use {@code -1} to truncate to the high watermark. */ public static RecordsToDelete beforeOffset(long offset) { return new RecordsToDelete(offset); } /** - * The offset before which all records will be deleted + * The offset before which all records will be deleted. + * Use {@code -1} to truncate to the high watermark. */ public long beforeOffset() { return offset; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java index cb5fe563c199d..da6e965ebe0fe 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/RemoveRaftVoterOptions.java @@ -17,11 +17,20 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.protocol.Errors; import java.util.Optional; /** * Options for {@link Admin#removeRaftVoter}. + * + *

+ * The clusterId is optional. + *

+ * If provided, the request will only succeed if the cluster id matches the id of the current cluster. + * If the cluster id does not match, the request will fail with {@link Errors#INCONSISTENT_CLUSTER_ID}. + *

+ * If not provided, the cluster id check is skipped. */ @InterfaceStability.Stable public class RemoveRaftVoterOptions extends AbstractOptions { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java b/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java index b77375d59605d..efe645b704d16 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java @@ -33,6 +33,7 @@ public ReplicaInfo(long size, long offsetLag, boolean isFuture) { /** * The total size of the log segments in this replica in bytes. + * This value does not include the size of data stored in remote storage. */ public long size() { return size; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java index bb814d51e74fd..63e0a06ea2486 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java @@ -20,5 +20,5 @@ /** * Options for {@link Admin#unregisterBroker(int, UnregisterBrokerOptions)}. */ -public class UnregisterBrokerOptions extends AbstractOptions { +public class UnregisterBrokerOptions extends AbstractOptions { } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java index 8e115c2944ded..77a491a320180 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java @@ -95,7 +95,7 @@ public boolean equals(Object o) { public String toString() { StringBuilder bld = new StringBuilder(); bld.append("AdminBootstrapAddresses"); - bld.append("(usingBoostrapControllers=").append(usingBootstrapControllers); + bld.append("(usingBootstrapControllers=").append(usingBootstrapControllers); bld.append(", addresses=["); String prefix = ""; for (InetSocketAddress address : addresses) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java index 5ef72f327d637..99111a70d4bae 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java @@ -108,7 +108,7 @@ public OffsetCommitRequest.Builder buildBatchedRequest( .setGroupId(groupId.idValue) .setTopics(new ArrayList<>(offsetData.values())); - return new OffsetCommitRequest.Builder(data); + return OffsetCommitRequest.Builder.forTopicNames(data); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterShareGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterShareGroupOffsetsHandler.java index f66f597283630..9a8149d479180 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterShareGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterShareGroupOffsetsHandler.java @@ -21,8 +21,8 @@ import org.apache.kafka.clients.admin.KafkaAdminClient; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.message.AlterShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.AlterShareGroupOffsetsRequest; @@ -33,7 +33,6 @@ import org.slf4j.Logger; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -42,7 +41,7 @@ /** * This class is the handler for {@link KafkaAdminClient#alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)} call */ -public class AlterShareGroupOffsetsHandler extends AdminApiHandler.Batched> { +public class AlterShareGroupOffsetsHandler extends AdminApiHandler.Batched> { private final CoordinatorKey groupId; @@ -52,7 +51,6 @@ public class AlterShareGroupOffsetsHandler extends AdminApiHandler.Batched offsets, LogContext logContext) { this.groupId = CoordinatorKey.byGroupId(groupId); this.offsets = offsets; @@ -60,8 +58,15 @@ public AlterShareGroupOffsetsHandler(String groupId, Map o this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.GROUP, logContext); } - public static AdminApiFuture.SimpleAdminApiFuture> newFuture(String groupId) { - return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId))); + public static AdminApiFuture.SimpleAdminApiFuture> newFuture(String groupId) { + return AdminApiFuture.forKeys(Set.of(CoordinatorKey.byGroupId(groupId))); + } + + private void validateKeys(Set groupIds) { + if (!groupIds.equals(Set.of(groupId))) { + throw new IllegalArgumentException("Received unexpected group ids " + groupIds + + " (expected only " + Set.of(groupId) + ")"); + } } @Override @@ -87,30 +92,38 @@ public String apiName() { } @Override - public ApiResult> handleResponse(Node broker, Set keys, AbstractResponse abstractResponse) { + public ApiResult> handleResponse(Node broker, Set keys, AbstractResponse abstractResponse) { + validateKeys(keys); + AlterShareGroupOffsetsResponse response = (AlterShareGroupOffsetsResponse) abstractResponse; - final Map partitionResults = new HashMap<>(); final Set groupsToUnmap = new HashSet<>(); final Set groupsToRetry = new HashSet<>(); - - for (AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic topic : response.data().responses()) { - for (AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition partition : topic.partitions()) { - TopicPartition topicPartition = new TopicPartition(topic.topicName(), partition.partitionIndex()); - Errors error = Errors.forCode(partition.errorCode()); - - if (error != Errors.NONE) { - handleError( - groupId, - topicPartition, - error, - partitionResults, - groupsToUnmap, - groupsToRetry - ); - } else { - partitionResults.put(topicPartition, error); + final Map partitionResults = new HashMap<>(); + + if (response.data().errorCode() != Errors.NONE.code()) { + final Errors topLevelError = Errors.forCode(response.data().errorCode()); + final String topLevelErrorMessage = response.data().errorMessage(); + + offsets.forEach((topicPartition, offset) -> + handleError( + groupId, + topicPartition, + topLevelError, + topLevelErrorMessage, + partitionResults, + groupsToUnmap, + groupsToRetry + )); + } else { + response.data().responses().forEach(topic -> topic.partitions().forEach(partition -> { + final Errors partitionError = Errors.forCode(partition.errorCode()); + if (partitionError != Errors.NONE) { + String errorMessageToLog = partition.errorMessage() == null ? "" : partition.errorMessage(); + log.debug("AlterShareGroupOffsets request for group id {} and topic-partition {}-{} failed and returned error {}. {}", + groupId.idValue, topic.topicName(), partition.partitionIndex(), partitionError.name(), errorMessageToLog); } - } + partitionResults.put(new TopicPartition(topic.topicName(), partition.partitionIndex()), partitionError.exception(partition.errorMessage())); + })); } if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) { @@ -121,24 +134,26 @@ public ApiResult> handleResponse(Nod } private void handleError( - CoordinatorKey groupId, - TopicPartition topicPartition, - Errors error, - Map partitionResults, - Set groupsToUnmap, - Set groupsToRetry + CoordinatorKey groupId, + TopicPartition topicPartition, + Errors error, + String errorMessage, + Map partitionResults, + Set groupsToUnmap, + Set groupsToRetry ) { + String errorMessageToLog = errorMessage == null ? "" : errorMessage; switch (error) { case COORDINATOR_LOAD_IN_PROGRESS: case REBALANCE_IN_PROGRESS: - log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will retry.", - groupId.idValue, error); + log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will retry. {}", + groupId.idValue, error, errorMessageToLog); groupsToRetry.add(groupId); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: - log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry.", - groupId.idValue, error); + log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry. {}", + groupId.idValue, error, errorMessageToLog); groupsToUnmap.add(groupId); break; case GROUP_ID_NOT_FOUND: @@ -147,14 +162,14 @@ private void handleError( case UNKNOWN_SERVER_ERROR: case KAFKA_STORAGE_ERROR: case GROUP_AUTHORIZATION_FAILED: - log.debug("AlterShareGroupOffsets request for group id {} and partition {} failed due" + - " to error {}.", groupId.idValue, topicPartition, error); - partitionResults.put(topicPartition, error); + log.debug("AlterShareGroupOffsets request for group id {} failed due to error {}. {}", + groupId.idValue, error, errorMessageToLog); + partitionResults.put(topicPartition, error.exception(errorMessage)); break; default: - log.error("AlterShareGroupOffsets request for group id {} and partition {} failed due" + - " to unexpected error {}.", groupId.idValue, topicPartition, error); - partitionResults.put(topicPartition, error); + log.error("AlterShareGroupOffsets request for group id {} failed due to unexpected error {}. {}", + groupId.idValue, error, errorMessageToLog); + partitionResults.put(topicPartition, error.exception(errorMessage)); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupOffsetsHandler.java index c5911e4303e72..9f0ab59e32cb6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupOffsetsHandler.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.admin.DeleteShareGroupOffsetsOptions; import org.apache.kafka.clients.admin.KafkaAdminClient; import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData; import org.apache.kafka.common.protocol.Errors; @@ -38,24 +37,23 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; /** * This class is the handler for {@link KafkaAdminClient#deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)} call */ -public class DeleteShareGroupOffsetsHandler extends AdminApiHandler.Batched> { +public class DeleteShareGroupOffsetsHandler extends AdminApiHandler.Batched> { private final CoordinatorKey groupId; private final Logger log; - private final Set partitions; + private final Set topics; private final CoordinatorStrategy lookupStrategy; - public DeleteShareGroupOffsetsHandler(String groupId, Set partitions, LogContext logContext) { + public DeleteShareGroupOffsetsHandler(String groupId, Set topics, LogContext logContext) { this.groupId = CoordinatorKey.byGroupId(groupId); - this.partitions = partitions; + this.topics = topics; this.log = logContext.logger(DeleteShareGroupOffsetsHandler.class); this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.GROUP, logContext); } @@ -70,7 +68,7 @@ public AdminApiLookupStrategy lookupStrategy() { return lookupStrategy; } - public static AdminApiFuture.SimpleAdminApiFuture> newFuture(String groupId) { + public static AdminApiFuture.SimpleAdminApiFuture> newFuture(String groupId) { return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId))); } @@ -85,27 +83,22 @@ private void validateKeys(Set groupIds) { DeleteShareGroupOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set groupIds) { validateKeys(groupIds); - final List topics = + final List requestTopics = new ArrayList<>(); - partitions.stream().collect(Collectors.groupingBy(TopicPartition::topic)).forEach((topic, topicPartitions) -> topics.add( + topics.forEach(topic -> requestTopics.add( new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic() .setTopicName(topic) - .setPartitions(topicPartitions.stream() - .map(TopicPartition::partition) - .collect(Collectors.toList()) - ) )); return new DeleteShareGroupOffsetsRequest.Builder( new DeleteShareGroupOffsetsRequestData() .setGroupId(groupId.idValue) - .setTopics(topics), - true + .setTopics(requestTopics) ); } @Override - public ApiResult> handleResponse( + public ApiResult> handleResponse( Node coordinator, Set groupIds, AbstractResponse abstractResponse @@ -124,23 +117,21 @@ public ApiResult> handleRespon return new ApiResult<>(Collections.emptyMap(), groupsFailed, new ArrayList<>(groupsToUnmap)); } else { - final Map partitionResults = new HashMap<>(); - response.data().responses().forEach(topic -> - topic.partitions().forEach(partition -> { - if (partition.errorCode() != Errors.NONE.code()) { - final Errors partitionError = Errors.forCode(partition.errorCode()); - final String partitionErrorMessage = partition.errorMessage(); - log.debug("DeleteShareGroupOffsets request for group id {}, topic {} and partition {} failed and returned error {}." + partitionErrorMessage, - groupId.idValue, topic.topicName(), partition.partitionIndex(), partitionError); - } - partitionResults.put( - new TopicPartition(topic.topicName(), partition.partitionIndex()), - Errors.forCode(partition.errorCode()).exception(partition.errorMessage()) - ); - }) - ); - - return ApiResult.completed(groupId, partitionResults); + final Map topicResults = new HashMap<>(); + response.data().responses().forEach(topic -> { + if (topic.errorCode() != Errors.NONE.code()) { + final Errors topicError = Errors.forCode(topic.errorCode()); + final String topicErrorMessage = topic.errorMessage(); + log.debug("DeleteShareGroupOffsets request for group id {} and topic {} failed and returned error {}. {}", + groupId.idValue, topic.topicName(), topicError, topicErrorMessage); + } + topicResults.put( + topic.topicName(), + Errors.forCode(topic.errorCode()).exception(topic.errorMessage()) + ); + }); + + return ApiResult.completed(groupId, topicResults); } } @@ -156,14 +147,14 @@ private void handleGroupError( case REBALANCE_IN_PROGRESS: // If the coordinator is in the middle of loading, then we just need to retry log.debug("DeleteShareGroupOffsets request for group id {} failed because the coordinator" + - " is still in the process of loading state. Will retry. " + errorMessage, groupId.idValue); + " is still in the process of loading state. Will retry. {}", groupId.idValue, errorMessage); break; case COORDINATOR_NOT_AVAILABLE: case NOT_COORDINATOR: // If the coordinator is unavailable or there was a coordinator change, then we unmap // the key so that we retry the `FindCoordinator` request - log.debug("DeleteShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry. " + errorMessage, - groupId.idValue, error); + log.debug("DeleteShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry. {}", + groupId.idValue, error, errorMessage); groupsToUnmap.add(groupId); break; case INVALID_GROUP_ID: @@ -173,11 +164,11 @@ private void handleGroupError( case UNKNOWN_SERVER_ERROR: case KAFKA_STORAGE_ERROR: case GROUP_AUTHORIZATION_FAILED: - log.debug("DeleteShareGroupOffsets request for group id {} failed due to error {}. " + errorMessage, groupId.idValue, error); + log.debug("DeleteShareGroupOffsets request for group id {} failed due to error {}. {}", groupId.idValue, error, errorMessage); failed.put(groupId, error.exception(errorMessage)); break; default: - log.error("DeleteShareGroupOffsets request for group id {} failed due to unexpected error {}. " + errorMessage, groupId.idValue, error); + log.error("DeleteShareGroupOffsets request for group id {} failed due to unexpected error {}. {}", groupId.idValue, error, errorMessage); failed.put(groupId, error.exception(errorMessage)); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java index 1aa37bbf77647..1f49a0d60580d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java @@ -92,7 +92,7 @@ public ShareGroupDescribeRequest.Builder buildBatchedRequest(int coordinatorId, ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() .setGroupIds(groupIds) .setIncludeAuthorizedOperations(includeAuthorizedOperations); - return new ShareGroupDescribeRequest.Builder(data, true); + return new ShareGroupDescribeRequest.Builder(data); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeStreamsGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeStreamsGroupsHandler.java index 8355a78b9d458..9c037d7dd4606 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeStreamsGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeStreamsGroupsHandler.java @@ -36,7 +36,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -259,22 +258,9 @@ private void handleError( break; case GROUP_ID_NOT_FOUND: - // In order to maintain compatibility with describeConsumerGroups, an unknown group ID is - // reported as a DEAD streams group, and the admin client operation did not fail log.debug("`DescribeStreamsGroups` request for group id {} failed because the group does not exist. {}", groupId.idValue, errorMsg != null ? errorMsg : ""); - final StreamsGroupDescription streamsGroupDescription = - new StreamsGroupDescription( - groupId.idValue, - -1, - -1, - -1, - Collections.emptySet(), - Collections.emptySet(), - GroupState.DEAD, - coordinator, - validAclOperations(describedGroup.authorizedOperations())); - completed.put(groupId, streamsGroupDescription); + failed.put(groupId, error.exception(errorMsg)); break; default: diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java index 4c0e3db925404..febc4033223b5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java @@ -20,11 +20,13 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; @@ -35,7 +37,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -86,15 +87,32 @@ private static Set coordinatorKeys(Collection groupIds) } public OffsetFetchRequest.Builder buildBatchedRequest(Set groupIds) { - // Create a map that only contains the consumer groups owned by the coordinator. - Map> coordinatorGroupIdToTopicPartitions = new HashMap<>(groupIds.size()); - groupIds.forEach(g -> { - ListConsumerGroupOffsetsSpec spec = groupSpecs.get(g.idValue); - List partitions = spec.topicPartitions() != null ? new ArrayList<>(spec.topicPartitions()) : null; - coordinatorGroupIdToTopicPartitions.put(g.idValue, partitions); - }); - - return new OffsetFetchRequest.Builder(coordinatorGroupIdToTopicPartitions, requireStable, false); + // Create a request that only contains the consumer groups owned by the coordinator. + return OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(groupIds.stream().map(groupId -> { + ListConsumerGroupOffsetsSpec spec = groupSpecs.get(groupId.idValue); + + List topics = null; + if (spec.topicPartitions() != null) { + topics = spec.topicPartitions().stream() + .collect(Collectors.groupingBy(TopicPartition::topic)) + .entrySet() + .stream() + .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(entry.getKey()) + .setPartitionIndexes(entry.getValue().stream() + .map(TopicPartition::partition) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); + } + return new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId.idValue) + .setTopics(topics); + }).collect(Collectors.toList())), + false + ); } @Override @@ -121,40 +139,52 @@ public ApiResult> handleR ) { validateKeys(groupIds); - final OffsetFetchResponse response = (OffsetFetchResponse) abstractResponse; + var response = (OffsetFetchResponse) abstractResponse; + var completed = new HashMap>(); + var failed = new HashMap(); + var unmapped = new ArrayList(); - Map> completed = new HashMap<>(); - Map failed = new HashMap<>(); - List unmapped = new ArrayList<>(); for (CoordinatorKey coordinatorKey : groupIds) { - String group = coordinatorKey.idValue; - if (response.groupHasError(group)) { - handleGroupError(CoordinatorKey.byGroupId(group), response.groupLevelError(group), failed, unmapped); + var groupId = coordinatorKey.idValue; + var group = response.group(groupId); + var error = Errors.forCode(group.errorCode()); + + if (error != Errors.NONE) { + handleGroupError( + coordinatorKey, + error, + failed, + unmapped + ); } else { - final Map groupOffsetsListing = new HashMap<>(); - Map responseData = response.partitionDataMap(group); - for (Map.Entry partitionEntry : responseData.entrySet()) { - final TopicPartition topicPartition = partitionEntry.getKey(); - OffsetFetchResponse.PartitionData partitionData = partitionEntry.getValue(); - final Errors error = partitionData.error; - - if (error == Errors.NONE) { - final long offset = partitionData.offset; - final String metadata = partitionData.metadata; - final Optional leaderEpoch = partitionData.leaderEpoch; - // Negative offset indicates that the group has no committed offset for this partition - if (offset < 0) { - groupOffsetsListing.put(topicPartition, null); + var offsets = new HashMap(); + + group.topics().forEach(topic -> + topic.partitions().forEach(partition -> { + var tp = new TopicPartition(topic.name(), partition.partitionIndex()); + var partitionError = Errors.forCode(partition.errorCode()); + + if (partitionError == Errors.NONE) { + // Negative offset indicates that the group has no committed offset for this partition. + if (partition.committedOffset() < 0) { + offsets.put(tp, null); + } else { + offsets.put(tp, new OffsetAndMetadata( + partition.committedOffset(), + RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), + partition.metadata() + )); + } } else { - groupOffsetsListing.put(topicPartition, new OffsetAndMetadata(offset, leaderEpoch, metadata)); + log.warn("Skipping return offset for {} due to error {}.", tp, partitionError); } - } else { - log.warn("Skipping return offset for {} due to error {}.", topicPartition, error); - } - } - completed.put(CoordinatorKey.byGroupId(group), groupOffsetsListing); + }) + ); + + completed.put(coordinatorKey, offsets); } } + return new ApiResult<>(completed, failed, unmapped); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java index f7c495d7fd8aa..330a9efaf9b6c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java @@ -103,12 +103,17 @@ ListOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set .stream() .anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.LATEST_TIERED_TIMESTAMP); + boolean requireEarliestPendingUploadTimestamp = keys + .stream() + .anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP); + int timeoutMs = options.timeoutMs() != null ? options.timeoutMs() : defaultApiTimeoutMs; return ListOffsetsRequest.Builder.forConsumer(true, options.isolationLevel(), supportsMaxTimestamp, requireEarliestLocalTimestamp, - requireTieredStorageTimestamp) + requireTieredStorageTimestamp, + requireEarliestPendingUploadTimestamp) .setTargetTimes(new ArrayList<>(topicsByName.values())) .setTimeoutMs(timeoutMs); } @@ -197,7 +202,7 @@ private void handlePartitionError( public Map handleUnsupportedVersionException( int brokerId, UnsupportedVersionException exception, Set keys ) { - log.warn("Broker " + brokerId + " does not support MAX_TIMESTAMP offset specs"); + log.warn("Broker {} does not support MAX_TIMESTAMP offset specs", brokerId); Map maxTimestampPartitions = new HashMap<>(); for (TopicPartition topicPartition : keys) { Long offsetTimestamp = offsetTimestampsByPartition.get(topicPartition); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListShareGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListShareGroupOffsetsHandler.java index 3f523b93833d4..f9b9e987930bd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListShareGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListShareGroupOffsetsHandler.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.admin.KafkaAdminClient; import org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions; import org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData; @@ -39,13 +40,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * This class is the handler for {@link KafkaAdminClient#listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)} call */ -public class ListShareGroupOffsetsHandler extends AdminApiHandler.Batched> { +public class ListShareGroupOffsetsHandler extends AdminApiHandler.Batched> { private final Map groupSpecs; private final Logger log; @@ -58,7 +60,7 @@ public ListShareGroupOffsetsHandler(Map group this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } - public static AdminApiFuture.SimpleAdminApiFuture> newFuture(Collection groupIds) { + public static AdminApiFuture.SimpleAdminApiFuture> newFuture(Collection groupIds) { return AdminApiFuture.forKeys(coordinatorKeys(groupIds)); } @@ -104,17 +106,17 @@ public DescribeShareGroupOffsetsRequest.Builder buildBatchedRequest(int coordina }); DescribeShareGroupOffsetsRequestData data = new DescribeShareGroupOffsetsRequestData() .setGroups(groups); - return new DescribeShareGroupOffsetsRequest.Builder(data, true); + return new DescribeShareGroupOffsetsRequest.Builder(data); } @Override - public ApiResult> handleResponse(Node coordinator, - Set groupIds, - AbstractResponse abstractResponse) { + public ApiResult> handleResponse(Node coordinator, + Set groupIds, + AbstractResponse abstractResponse) { validateKeys(groupIds); final DescribeShareGroupOffsetsResponse response = (DescribeShareGroupOffsetsResponse) abstractResponse; - final Map> completed = new HashMap<>(); + final Map> completed = new HashMap<>(); final Map failed = new HashMap<>(); final List unmapped = new ArrayList<>(); @@ -123,17 +125,19 @@ public ApiResult> handleResponse(Node if (response.hasGroupError(groupId)) { handleGroupError(coordinatorKey, response.groupError(groupId), failed, unmapped); } else { - Map groupOffsetsListing = new HashMap<>(); + Map groupOffsetsListing = new HashMap<>(); response.data().groups().stream().filter(g -> g.groupId().equals(groupId)).forEach(groupResponse -> { for (DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic topicResponse : groupResponse.topics()) { for (DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition partitionResponse : topicResponse.partitions()) { TopicPartition tp = new TopicPartition(topicResponse.topicName(), partitionResponse.partitionIndex()); if (partitionResponse.errorCode() == Errors.NONE.code()) { + final long startOffset = partitionResponse.startOffset(); + final Optional leaderEpoch = partitionResponse.leaderEpoch() < 0 ? Optional.empty() : Optional.of(partitionResponse.leaderEpoch()); // Negative offset indicates there is no start offset for this partition if (partitionResponse.startOffset() < 0) { groupOffsetsListing.put(tp, null); } else { - groupOffsetsListing.put(tp, partitionResponse.startOffset()); + groupOffsetsListing.put(tp, new OffsetAndMetadata(startOffset, leaderEpoch, "")); } } else { log.warn("Skipping return offset for {} due to error {}: {}.", tp, partitionResponse.errorCode(), partitionResponse.errorMessage()); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java index 71b8e1a7c5607..f47d9f90189a5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java @@ -75,6 +75,9 @@ public ListTransactionsRequest.Builder buildBatchedRequest( .map(TransactionState::toString) .collect(Collectors.toList())); request.setDurationFilter(options.filteredDuration()); + if (options.filteredTransactionalIdPattern() != null && !options.filteredTransactionalIdPattern().isEmpty()) { + request.setTransactionalIdPattern(options.filteredTransactionalIdPattern()); + } return new ListTransactionsRequest.Builder(request); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java b/clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java index 0cbbcbca54d81..0aae57d39cfc0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java @@ -47,23 +47,18 @@ public enum GroupMembershipOperation { * Specifies the group membership operation upon shutdown. * By default, {@code GroupMembershipOperation.DEFAULT} will be applied, which follows the consumer's default behavior. */ - protected GroupMembershipOperation operation = GroupMembershipOperation.DEFAULT; + private GroupMembershipOperation operation = GroupMembershipOperation.DEFAULT; /** * Specifies the maximum amount of time to wait for the close process to complete. * This allows users to define a custom timeout for gracefully stopping the consumer. * If no value is set, the default timeout {@link ConsumerUtils#DEFAULT_CLOSE_TIMEOUT_MS} will be applied. */ - protected Optional timeout = Optional.empty(); + private Optional timeout = Optional.empty(); private CloseOptions() { } - protected CloseOptions(final CloseOptions option) { - this.operation = option.operation; - this.timeout = option.timeout; - } - /** * Static method to create a {@code CloseOptions} with a custom timeout. * @@ -71,9 +66,7 @@ protected CloseOptions(final CloseOptions option) { * @return a new {@code CloseOptions} instance with the specified timeout. */ public static CloseOptions timeout(final Duration timeout) { - CloseOptions option = new CloseOptions(); - option.timeout = Optional.ofNullable(timeout); - return option; + return new CloseOptions().withTimeout(timeout); } /** @@ -84,10 +77,7 @@ public static CloseOptions timeout(final Duration timeout) { * @return a new {@code CloseOptions} instance with the specified group membership operation. */ public static CloseOptions groupMembershipOperation(final GroupMembershipOperation operation) { - Objects.requireNonNull(operation, "operation should not be null"); - CloseOptions option = new CloseOptions(); - option.operation = operation; - return option; + return new CloseOptions().withGroupMembershipOperation(operation); } /** @@ -108,8 +98,7 @@ public CloseOptions withTimeout(final Duration timeout) { * @return this {@code CloseOptions} instance. */ public CloseOptions withGroupMembershipOperation(final GroupMembershipOperation operation) { - Objects.requireNonNull(operation, "operation should not be null"); - this.operation = operation; + this.operation = Objects.requireNonNull(operation, "operation should not be null"); return this; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java index 6c1c42c9d8c99..365d19d41349a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java @@ -277,20 +277,18 @@ public interface Consumer extends Closeable { void close(); /** - * This method has been deprecated since Kafka 4.0 and should use {@link Consumer#close(CloseOptions)} instead. - * * @see KafkaConsumer#close(Duration) */ @Deprecated void close(Duration timeout); /** - * @see KafkaConsumer#wakeup() + * @see KafkaConsumer#close(CloseOptions) */ - void wakeup(); + void close(final CloseOptions option); /** - * @see KafkaConsumer#close(CloseOptions) + * @see KafkaConsumer#wakeup() */ - void close(final CloseOptions option); + void wakeup(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 9f7dd92d2917b..43e793f4af123 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -37,7 +37,6 @@ import org.apache.kafka.common.utils.Utils; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -63,9 +62,9 @@ public class ConsumerConfig extends AbstractConfig { // a list contains all the assignor names that only assign subscribed topics to consumer. Should be updated when new assignor added. // This is to help optimize ConsumerCoordinator#performAssignment method public static final List ASSIGN_FROM_SUBSCRIBED_ASSIGNORS = List.of( - RANGE_ASSIGNOR_NAME, - ROUNDROBIN_ASSIGNOR_NAME, - STICKY_ASSIGNOR_NAME, + RANGE_ASSIGNOR_NAME, + ROUNDROBIN_ASSIGNOR_NAME, + STICKY_ASSIGNOR_NAME, COOPERATIVE_STICKY_ASSIGNOR_NAME ); @@ -186,7 +185,7 @@ public class ConsumerConfig extends AbstractConfig { */ public static final String FETCH_MIN_BYTES_CONFIG = "fetch.min.bytes"; public static final int DEFAULT_FETCH_MIN_BYTES = 1; - private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of " + DEFAULT_FETCH_MIN_BYTES + " byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency."; + private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of " + DEFAULT_FETCH_MIN_BYTES + " byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency. Even if the total data available in the broker exceeds fetch.min.bytes, the actual returned size may still be less than this value due to per-partition limits max.partition.fetch.bytes and max returned limits fetch.max.bytes."; /** * fetch.max.bytes @@ -196,7 +195,8 @@ public class ConsumerConfig extends AbstractConfig { "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than " + "this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. " + "The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or " + - "max.message.bytes (topic config). Note that the consumer performs multiple fetches in parallel."; + "max.message.bytes (topic config). A fetch request consists of many partitions, and there is another setting that controls how much " + + "data is returned for each partition in a fetch request - see max.partition.fetch.bytes. Note that the consumer performs multiple fetches in parallel."; public static final int DEFAULT_FETCH_MAX_BYTES = 50 * 1024 * 1024; /** @@ -330,17 +330,6 @@ public class ConsumerConfig extends AbstractConfig { "be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic."; public static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS = true; - /** - * internal.leave.group.on.close - * Whether or not the consumer should leave the group on close. If set to false then a rebalance - * won't occur until session.timeout.ms expires. - * - *

- * Note: this is an internal configuration and could be changed in the future in a backward incompatible way - * - */ - static final String LEAVE_GROUP_ON_CLOSE_CONFIG = "internal.leave.group.on.close"; - /** * internal.throw.on.fetch.stable.offset.unsupported * Whether or not the consumer should throw when the new stable offset feature is supported. @@ -371,7 +360,7 @@ public class ConsumerConfig extends AbstractConfig { public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG = "allow.auto.create.topics"; private static final String ALLOW_AUTO_CREATE_TOPICS_DOC = "Allow automatic topic creation on the broker when" + " subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the" + - " broker allows for it using `auto.create.topics.enable` broker configuration."; + " broker allows for it using auto.create.topics.enable broker configuration."; public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS = true; /** @@ -405,17 +394,17 @@ public class ConsumerConfig extends AbstractConfig { * A list of configuration keys not supported for CONSUMER protocol. */ private static final List CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS = List.of( - PARTITION_ASSIGNMENT_STRATEGY_CONFIG, - HEARTBEAT_INTERVAL_MS_CONFIG, + PARTITION_ASSIGNMENT_STRATEGY_CONFIG, + HEARTBEAT_INTERVAL_MS_CONFIG, SESSION_TIMEOUT_MS_CONFIG, SHARE_ACKNOWLEDGEMENT_MODE_CONFIG ); - + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - Collections.emptyList(), - new ConfigDef.NonNullValidator(), + ConfigDef.NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, @@ -445,7 +434,7 @@ public class ConsumerConfig extends AbstractConfig { .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, Type.LIST, List.of(RangeAssignor.class, CooperativeStickyAssignor.class), - new ConfigDef.NonNullValidator(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.MEDIUM, PARTITION_ASSIGNMENT_STRATEGY_DOC) .define(METADATA_MAX_AGE_CONFIG, @@ -572,7 +561,7 @@ public class ConsumerConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), - new ConfigDef.NonNullValidator(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(KEY_DESERIALIZER_CLASS_CONFIG, @@ -613,8 +602,8 @@ public class ConsumerConfig extends AbstractConfig { CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, - Collections.emptyList(), - new ConfigDef.NonNullValidator(), + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(MAX_POLL_RECORDS_CONFIG, @@ -634,10 +623,6 @@ public class ConsumerConfig extends AbstractConfig { DEFAULT_EXCLUDE_INTERNAL_TOPICS, Importance.MEDIUM, EXCLUDE_INTERNAL_TOPICS_DOC) - .defineInternal(LEAVE_GROUP_ON_CLOSE_CONFIG, - Type.BOOLEAN, - true, - Importance.LOW) .defineInternal(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, Type.BOOLEAN, false, @@ -697,7 +682,13 @@ public class ConsumerConfig extends AbstractConfig { ShareAcknowledgementMode.IMPLICIT.name(), new ShareAcknowledgementMode.Validator(), Importance.MEDIUM, - ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_DOC); + ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_DOC) + .define(CONFIG_PROVIDERS_CONFIG, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.LOW, + CONFIG_PROVIDERS_DOC); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java index 20f2551ba6bc2..45cb505c744e5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java @@ -291,9 +291,6 @@ static List getAssignorInstances(List assigno // a map to store assignor name -> assignor class name Map assignorNameMap = new HashMap<>(); - if (assignorClasses == null) - return assignors; - for (Object klass : assignorClasses) { // first try to get the class if passed in as a string if (klass instanceof String) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java index 914c0ab979f54..23e045b760005 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.TopicPartition; -import java.time.Duration; import java.util.Collection; /** @@ -51,7 +50,7 @@ * Under normal conditions, if a partition is reassigned from one consumer to another, then the old consumer will * always invoke {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} for that partition prior to the new consumer * invoking {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} for the same partition. So if offsets or other state is saved in the - * {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} call by one consumer member, it will be always accessible by the time the + * {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} call by one consumer member, it will always be accessible by the time the * other consumer member taking over that partition and triggering its {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} callback to load the state. *

* You can think of revocation as a graceful way to give up ownership of a partition. In some cases, the consumer may not have an opportunity to do so. @@ -121,13 +120,31 @@ public interface ConsumerRebalanceListener { /** * A callback method the user can implement to provide handling of offset commits to a customized store. * This method will be called during a rebalance operation when the consumer has to give up some partitions. - * It can also be called when consumer is being closed ({@link KafkaConsumer#close(Duration)}) - * or is unsubscribing ({@link KafkaConsumer#unsubscribe()}). + * The consumer may need to give up some partitions (thus this callback executed) under the following scenarios: + *

    + *
  • If the consumer assignment changes
  • + *
  • If the consumer is being closed ({@link KafkaConsumer#close(CloseOptions option)})
  • + *
  • If the consumer is unsubscribing ({@link KafkaConsumer#unsubscribe()})
  • + *
* It is recommended that offsets should be committed in this callback to either Kafka or a * custom offset store to prevent duplicate data. *

- * In eager rebalancing, it will always be called at the start of a rebalance and after the consumer stops fetching data. - * In cooperative rebalancing, it will be called at the end of a rebalance on the set of partitions being revoked iff the set is non-empty. + * This callback is always called before re-assigning the partitions. + * If the consumer is using the {@link GroupProtocol#CLASSIC} rebalance protocol: + *

    + *
  • + * In eager rebalancing, onPartitionsRevoked will be called with the full set of assigned partitions as a parameter (all partitions are revoked). + * It will be called even if there are no partitions to revoke. + *
  • + *
  • + * In cooperative rebalancing, onPartitionsRevoked will be called with the set of partitions to revoke, + * iff the set is non-empty. + *
  • + *
+ * If the consumer is using the {@link GroupProtocol#CONSUMER} rebalance protocol, this callback will be called + * with the set of partitions to revoke iff the set is non-empty + * (same behavior as the {@link GroupProtocol#CLASSIC} rebalance protocol with Cooperative mode). + *

* For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}. *

* It is common for the revocation callback to use the consumer instance in order to commit offsets. It is possible @@ -136,8 +153,9 @@ public interface ConsumerRebalanceListener { * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * - * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked (may not - * include all currently assigned partitions, i.e. there may still be some partitions left) + * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked. This will + * include the full assignment under the Classic/Eager protocol, given that it revokes all partitions. + * It will only include the subset to revoke under the Classic/Cooperative and Consumer protocols. * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ @@ -145,12 +163,13 @@ public interface ConsumerRebalanceListener { /** * A callback method the user can implement to provide handling of customized offsets on completion of a successful - * partition re-assignment. This method will be called after the partition re-assignment completes and before the - * consumer starts fetching data, and only as the result of a {@link Consumer#poll(java.time.Duration) poll(long)} call. + * partition re-assignment. This method will be called after the partition re-assignment completes (even if no new + * partitions were assigned to the consumer), and before the consumer starts fetching data, + * and only as the result of a {@link Consumer#poll(java.time.Duration) poll(long)} call. *

* It is guaranteed that under normal conditions all the processes in a consumer group will execute their - * {@link #onPartitionsRevoked(Collection)} callback before any instance executes its - * {@link #onPartitionsAssigned(Collection)} callback. During exceptional scenarios, partitions may be migrated + * {@link #onPartitionsRevoked(Collection)} callback before any instance executes this onPartitionsAssigned callback. + * During exceptional scenarios, partitions may be migrated * without the old owner being notified (i.e. their {@link #onPartitionsRevoked(Collection)} callback not triggered), * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} callback * will be triggered by the consumer then. @@ -161,9 +180,11 @@ public interface ConsumerRebalanceListener { * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * - * @param partitions The list of partitions that are now assigned to the consumer (previously owned partitions will - * NOT be included, i.e. this list will only include newly added partitions) - * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} + * @param partitions Partitions that have been added to the assignment as a result of the rebalance. + * Note that partitions that were already owned by this consumer and remain assigned are not + * included in this list under the Classic/Cooperative or Consumer protocols. THe full assignment + * will be received under the Classic/Eager protocol. + * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ void onPartitionsAssigned(Collection partitions); @@ -188,10 +209,9 @@ public interface ConsumerRebalanceListener { * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * * @param partitions The list of partitions that were assigned to the consumer and now have been reassigned - * to other consumers. With the current protocol this will always include all of the consumer's - * previously assigned partitions, but this may change in future protocols (ie there would still - * be some partitions left) - * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} + * to other consumers. With both, the Classic and Consumer protocols, this will always include + * all partitions that were previously assigned to the consumer. + * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ default void onPartitionsLost(Collection partitions) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 2364864a3c1ef..9f1992d65688a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -30,6 +30,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidRegularExpression; +import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; @@ -660,7 +661,7 @@ public Set subscription() { * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. * *

- * As part of group management, the consumer will keep track of the list of consumers that belong to a particular + * As part of group management, the group coordinator will keep track of the list of consumers that belong to a particular * group and will trigger a rebalance operation if any one of the following events are triggered: *

    *
  • Number of partitions change for any of the subscribed topics @@ -669,8 +670,11 @@ public Set subscription() { *
  • A new member is added to the consumer group *
*

- * When any of these events are triggered, the provided listener will be invoked first to indicate that - * the consumer's assignment has been revoked, and then again when the new assignment has been received. + * When any of these events are triggered, the provided listener will be invoked in this way: + *

    + *
  • {@link ConsumerRebalanceListener#onPartitionsRevoked(Collection)} will be invoked with the partitions to revoke, before re-assigning those partitions to another consumer.
  • + *
  • {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection)} will be invoked when the rebalance completes (even if no new partitions are assigned to the consumer)
  • + *
* Note that rebalances will only occur during an active call to {@link #poll(Duration)}, so callbacks will * also only be invoked during that time. * @@ -908,7 +912,8 @@ public ConsumerRecords poll(final Duration timeout) { * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). *

* Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + * but only when the consumer is using the consumer group protocol. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This fatal error can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -952,7 +957,8 @@ public void commitSync() { * encountered (in which case it is thrown to the caller), or the passed timeout expires. *

* Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + * but only when the consumer is using the consumer group protocol. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -1001,9 +1007,11 @@ public void commitSync(Duration timeout) { * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). *

* Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + * but only when the consumer is using the consumer group protocol. * - * @param offsets A map of offsets by partition with associated metadata + * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it + * is safe to mutate the map after returning. * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same group.id which is using group management. In such cases, @@ -1052,9 +1060,11 @@ public void commitSync(final Map offsets) { * encountered (in which case it is thrown to the caller), or the timeout expires. *

* Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, + * but only when the consumer is using the consumer group protocol. * - * @param offsets A map of offsets by partition with associated metadata + * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it + * is safe to mutate the map after returning. * @param timeout The maximum amount of time to await completion of the offset commit * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -1143,7 +1153,7 @@ public void commitAsync(OffsetCommitCallback callback) { * offsets committed through this API are guaranteed to complete before a subsequent call to {@link #commitSync()} * (and variants) returns. * - * @param offsets A map of offsets by partition with associate metadata. This map will be copied internally, so it + * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it * is safe to mutate the map after returning. * @param callback Callback to invoke when the commit completes * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer is using the classic group protocol @@ -1563,8 +1573,8 @@ public Set paused() { * @param timestampsToSearch the mapping from partition to the timestamp to look up. * * @return a mapping from partition to the timestamp and offset of the first message with timestamp greater - * than or equal to the target timestamp. {@code null} will be returned for the partition if there is no - * such message. + * than or equal to the target timestamp. If the timestamp and offset for a specific partition cannot be found within + * the default timeout, and no corresponding message exists, the entry in the returned map will be {@code null} * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws IllegalArgumentException if the target timestamp is negative @@ -1590,8 +1600,8 @@ public Map offsetsForTimes(Map beginningOffsets(Collection par * @param partitions the partitions to get the earliest offsets * @param timeout The maximum amount of time to await retrieval of the beginning offsets * - * @return The earliest available offsets for the given partitions + * @return The earliest available offsets for the given partitions, and it will return empty map if zero timeout is provided * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before @@ -1684,7 +1694,7 @@ public Map endOffsets(Collection partition * @param partitions the partitions to get the end offsets. * @param timeout The maximum amount of time to await retrieval of the end offsets * - * @return The end offsets for the given partitions. + * @return The end offsets for the given partitions, and it will return empty map if zero timeout is provided * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offsets could not be fetched before @@ -1761,14 +1771,19 @@ public void enforceRebalance() { } /** - * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. + * Close the consumer with {@link CloseOptions.GroupMembershipOperation#DEFAULT default leave group behavior}, + * waiting for up to the default timeout of 30 seconds for any needed cleanup. * If auto-commit is enabled, this will commit the current offsets if possible within the default - * timeout. See {@link #close(Duration)} for details. Note that {@link #wakeup()} + * timeout. See {@link #close(CloseOptions)} for details. Note that {@link #wakeup()} * cannot be used to interrupt close. + *

+ * This close operation will attempt all shutdown steps even if one of them fails. + * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * - * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted - * before or while this function is called - * @throws org.apache.kafka.common.KafkaException for any other error during close + * @throws WakeupException if {@link #wakeup()} is called before or while this function is called + * @throws InterruptException if the calling thread is interrupted before or while this function is called + * @throws KafkaException for any other error during close + * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) */ @Override public void close() { @@ -1776,10 +1791,13 @@ public void close() { } /** - * Tries to close the consumer cleanly within the specified timeout. This method waits up to - * {@code timeout} for the consumer to complete pending commits and leave the group. + * This method has been deprecated since Kafka 4.1 and should use {@link KafkaConsumer#close(CloseOptions)} instead. + *

+ * Close the consumer with {@link CloseOptions.GroupMembershipOperation#DEFAULT default leave group behavior} + * cleanly within the specified timeout. This method waits up to + * {@code timeout} for the consumer to complete pending commits and maybe leave the group (if the member is dynamic). * If auto-commit is enabled, this will commit the current offsets if possible within the - * timeout. If the consumer is unable to complete offset commits and gracefully leave the group + * timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be * used to interrupt close. *

@@ -1789,39 +1807,68 @@ public void close() { * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. * Note that the execution time of callbacks (such as {@link OffsetCommitCallback} and * {@link ConsumerRebalanceListener}) does not consume time from the close timeout. + *

+ * This close operation will attempt all shutdown steps even if one of them fails. + * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * * @param timeout The maximum time to wait for consumer to close gracefully. The value must be * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. - * * @throws IllegalArgumentException If the {@code timeout} is negative. - * @throws InterruptException If the thread is interrupted before or while this function is called - * @throws org.apache.kafka.common.KafkaException for any other error during close + * @throws WakeupException if {@link #wakeup()} is called before or while this function is called + * @throws InterruptException if the calling thread is interrupted before or while this function is called + * @throws KafkaException for any other error during close + * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) */ + @Deprecated(since = "4.1") @Override - @SuppressWarnings("deprecation") public void close(Duration timeout) { delegate.close(timeout); } /** - * Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. - * The thread which is blocking in an operation will throw {@link org.apache.kafka.common.errors.WakeupException}. - * If no thread is blocking in a method which can throw {@link org.apache.kafka.common.errors.WakeupException}, the next call to such a method will raise it instead. + * Close the consumer cleanly. {@link CloseOptions} allows to specify a timeout and a + * {@link CloseOptions.GroupMembershipOperation leave group behavior}. + * If no timeout is specified, the default timeout of 30 seconds is used. + * If no leave group behavior is specified, the {@link CloseOptions.GroupMembershipOperation#DEFAULT default + * leave group behavior} is used. + *

+ * This method waits up to the timeout for the consumer to complete pending commits and maybe leave the group, + * depending on the specified leave group behavior. + * If auto-commit is enabled, this will commit the current offsets if possible within the + * timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) + * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be + * used to interrupt close. + *

+ * The actual maximum wait time is bounded by the {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} setting, which + * only applies to operations performed with the broker (coordinator-related requests and + * fetch sessions). Even if a larger timeout is specified, the consumer will not wait longer than + * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. + * Note that the execution time of callbacks (such as {@link OffsetCommitCallback} and + * {@link ConsumerRebalanceListener}) does not consume time from the close timeout. + *

+ * This close operation will attempt all shutdown steps even if one of them fails. + * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. + * + * @param option see {@link CloseOptions}; cannot be {@code null} + * @throws IllegalArgumentException If the {@code option} timeout is negative + * @throws WakeupException if {@link #wakeup()} is called before or while this function is called + * @throws InterruptException if the calling thread is interrupted before or while this function is called + * @throws KafkaException for any other error during close + * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) */ @Override - public void wakeup() { - delegate.wakeup(); + public void close(CloseOptions option) { + delegate.close(option); } /** - * This method allows the caller to specify shutdown behavior using the {@link CloseOptions} class. - * If {@code null} is provided, the default behavior will be applied, equivalent to providing a new {@link CloseOptions} instance. - * - * @param option see {@link CloseOptions} + * Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. + * The thread which is blocking in an operation will throw {@link org.apache.kafka.common.errors.WakeupException}. + * If no thread is blocking in a method which can throw {@link org.apache.kafka.common.errors.WakeupException}, the next call to such a method will raise it instead. */ @Override - public void close(CloseOptions option) { - delegate.close(option); + public void wakeup() { + delegate.wakeup(); } // Functions below are for testing only diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java index b5a862f239d0d..7f3bad2e318f2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java @@ -51,8 +51,7 @@ /** * A client that consumes records from a Kafka cluster using a share group. *

- * This is an early access feature under development which is introduced by KIP-932. - * It is not suitable for production use until it is fully implemented and released. + * This is a preview feature introduced by KIP-932. It is not yet recommended for production use. * *

Cross-Version Compatibility

* This client can communicate with brokers that are a version that supports share groups. You will receive an @@ -100,7 +99,7 @@ * of the topic-partitions that match its subscriptions. Records are acquired for delivery to this consumer with a * time-limited acquisition lock. While a record is acquired, it is not available for another consumer. By default, * the lock duration is 30 seconds, but it can also be controlled using the group {@code group.share.record.lock.duration.ms} - * configuration parameter. The idea is that the lock is automatically released once the lock duration has elapsed, and + * configuration property. The idea is that the lock is automatically released once the lock duration has elapsed, and * then the record is available to be given to another consumer. The consumer which holds the lock can deal with it in * the following ways: *
    @@ -116,8 +115,8 @@ * {@code group.share.record.lock.partition.limit}. By limiting the duration of the acquisition lock and automatically * releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. *

    - * The consumer can choose to use implicit or explicit acknowledgement of the records it processes by configuring the - * consumer {@code share.acknowledgement.mode} property. + * The consumer can choose to use implicit or explicit acknowledgement of the records it processes by using the + * consumer {@code share.acknowledgement.mode} configuration property. *

    * If the application sets the property to "implicit" or does not set it at all, then the consumer is using * implicit acknowledgement. In this mode, the application acknowledges delivery by: @@ -129,7 +128,7 @@ * the delivered records as processed successfully and commits the acknowledgements to Kafka. *

  • Calling {@link #close()} which releases any acquired records without acknowledgement.
  • *
- * If the application sets the property to "explicit", then the consumer is using explicit acknowledgment. + * If the application sets the property to "explicit", then the consumer is using explicit acknowledgement. * The application must acknowledge all records returned from {@link #poll(Duration)} using * {@link #acknowledge(ConsumerRecord, AcknowledgeType)} before its next call to {@link #poll(Duration)}. * If the application calls {@link #poll(Duration)} without having acknowledged all records, an @@ -162,6 +161,7 @@ * props.setProperty("group.id", "test"); * props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + * * KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props); * consumer.subscribe(Arrays.asList("foo")); * while (true) { @@ -181,6 +181,7 @@ * props.setProperty("group.id", "test"); * props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + * * KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props); * consumer.subscribe(Arrays.asList("foo")); * while (true) { @@ -194,7 +195,7 @@ * * *

Per-record acknowledgement (explicit acknowledgement)

- * This example demonstrates using different acknowledgement types depending on the outcome of processing the records. + * This example demonstrates using different acknowledge types depending on the outcome of processing the records. * Here the {@code share.acknowledgement.mode} property is set to "explicit" so the consumer must explicitly acknowledge each record. *
  *     Properties props = new Properties();
@@ -203,6 +204,7 @@
  *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
  *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
  *     props.setProperty("share.acknowledgement.mode", "explicit");
+ *
  *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
  *     consumer.subscribe(Arrays.asList("foo"));
  *     while (true) {
@@ -237,8 +239,8 @@
  * In read_uncommitted isolation level, the share group consumes all non-transactional and transactional
  * records. The consumption is bounded by the high-water mark.
  * 

- * In read_committed isolation level (not yet supported), the share group only consumes non-transactional - * records and committed transactional records. The set of records which are eligible to become in-flight records are + * In read_committed isolation level, the share group only consumes non-transactional records and + * committed transactional records. The set of records which are eligible to become in-flight records are * non-transactional records and committed transactional records only. The consumption is bounded by the last stable * offset, so an open transaction blocks the progress of the share group with read_committed isolation level. * @@ -443,7 +445,7 @@ public void unsubscribe() { } /** - * Fetch data for the topics specified using {@link #subscribe(Collection)}. It is an error to not have + * Deliver records for the topics specified using {@link #subscribe(Collection)}. It is an error to not have * subscribed to any topics before polling for data. * *

@@ -452,13 +454,14 @@ public void unsubscribe() { * * @param timeout The maximum time to block (must not be greater than {@link Long#MAX_VALUE} milliseconds) * - * @return map of topic to records since the last fetch for the subscribed list of topics + * @return map of topic to records * * @throws AuthenticationException if authentication fails. See the exception for more details * @throws AuthorizationException if caller lacks Read access to any of the subscribed * topics or to the share group. See the exception for more details * @throws IllegalArgumentException if the timeout value is negative - * @throws IllegalStateException if the consumer is not subscribed to any topics + * @throws IllegalStateException if the consumer is not subscribed to any topics, or it is using + * explicit acknowledgement and has not acknowledged all records previously delivered * @throws ArithmeticException if the timeout is greater than {@link Long#MAX_VALUE} milliseconds. * @throws InvalidTopicException if the current subscription contains any invalid * topic (per {@link org.apache.kafka.common.internals.Topic#validate(String)}) @@ -475,11 +478,12 @@ public ConsumerRecords poll(Duration timeout) { * Acknowledge successful delivery of a record returned on the last {@link #poll(Duration)} call. * The acknowledgement is committed on the next {@link #commitSync()}, {@link #commitAsync()} or * {@link #poll(Duration)} call. + *

This method can only be used if the consumer is using explicit acknowledgement. * * @param record The record to acknowledge * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already - * used implicit acknowledgement + * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using + * explicit acknowledgement */ @Override public void acknowledge(ConsumerRecord record) { @@ -489,20 +493,42 @@ public void acknowledge(ConsumerRecord record) { /** * Acknowledge delivery of a record returned on the last {@link #poll(Duration)} call indicating whether * it was processed successfully. The acknowledgement is committed on the next {@link #commitSync()}, - * {@link #commitAsync()} or {@link #poll(Duration)} call. By using this method, the consumer is using - * explicit acknowledgement. + * {@link #commitAsync()} or {@link #poll(Duration)} call. + *

This method can only be used if the consumer is using explicit acknowledgement. * * @param record The record to acknowledge - * @param type The acknowledgement type which indicates whether it was processed successfully + * @param type The acknowledge type which indicates whether it was processed successfully * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already - * used implicit acknowledgement + * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using + * explicit acknowledgement */ @Override public void acknowledge(ConsumerRecord record, AcknowledgeType type) { delegate.acknowledge(record, type); } + /** + * Acknowledge delivery of a record returned on the last {@link #poll(Duration)} call indicating whether + * it was processed successfully. The acknowledgement is committed on the next {@link #commitSync()}, + * {@link #commitAsync()} or {@link #poll(Duration)} call. + *

This method can only be used if the consumer is using explicit acknowledgement. + *

It provides an alternative to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for + * situations where the {@link ConsumerRecord} is not available, such as when the record could not be deserialized. + * + * @param topic The topic of the record to acknowledge + * @param partition The partition of the record to acknowledge + * @param offset The offset of the record to acknowledge + * @param type The acknowledge type which indicates whether it was processed successfully + * + * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using + * explicit acknowledgement + */ + + @Override + public void acknowledge(String topic, int partition, long offset, AcknowledgeType type) { + delegate.acknowledge(topic, partition, offset, type); + } + /** * Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, * the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or @@ -585,7 +611,7 @@ public void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callb * client to complete the request. *

* Client telemetry is controlled by the {@link ConsumerConfig#ENABLE_METRICS_PUSH_CONFIG} - * configuration option. + * configuration property. * * @param timeout The maximum time to wait for consumer client to determine its client instance ID. * The value must be non-negative. Specifying a timeout of zero means do not @@ -653,10 +679,13 @@ public void unregisterMetricFromSubscription(KafkaMetric metric) { * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. * This will commit acknowledgements if possible within the default timeout. * See {@link #close(Duration)} for details. Note that {@link #wakeup()} cannot be used to interrupt close. + *

+ * This close operation will attempt all shutdown steps even if one of them fails. + * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called + * @throws WakeupException if {@link #wakeup()} is called before or while this method is called * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close + * @throws KafkaException for any other error during close */ @Override public void close() { @@ -675,14 +704,16 @@ public void close() { * Even if a larger timeout is specified, the consumer will not wait longer than * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. * Note that the execution time of callbacks (such as {@link AcknowledgementCommitCallback}) do not consume time from the close timeout. + *

+ * This close operation will attempt all shutdown steps even if one of them fails. + * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * * @param timeout The maximum time to wait for consumer to close gracefully. The value must be * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. - * * @throws IllegalArgumentException if the {@code timeout} is negative - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close + * @throws WakeupException if {@link #wakeup()} is called before or while this method is called + * @throws InterruptException if the thread is interrupted before or while this method is called + * @throws KafkaException for any other error during close */ @Override public void close(Duration timeout) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java index 9022d79c0cc02..303f8e5f1ddc0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java @@ -335,7 +335,7 @@ public synchronized void addRecord(ConsumerRecord record) { * @param maxPollRecords the max.poll.records. */ public synchronized void setMaxPollRecords(long maxPollRecords) { - if (this.maxPollRecords < 1) { + if (maxPollRecords < 1) { throw new IllegalArgumentException("MaxPollRecords must be strictly superior to 0"); } this.maxPollRecords = maxPollRecords; @@ -575,8 +575,8 @@ public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } + @Deprecated @Override - @SuppressWarnings("deprecation") public synchronized void close(Duration timeout) { this.closed = true; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java index 81cb2eeec0046..f1dad522d5ab0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java @@ -104,6 +104,10 @@ public synchronized void acknowledge(ConsumerRecord record) { public synchronized void acknowledge(ConsumerRecord record, AcknowledgeType type) { } + @Override + public synchronized void acknowledge(String topic, int partition, long offset, AcknowledgeType type) { + } + @Override public synchronized Map> commitSync() { return new HashMap<>(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java index d6b3b947c209d..f459dd5ba5507 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java @@ -54,10 +54,7 @@ public OffsetAndMetadata(long offset, Optional leaderEpoch, String meta // The server converts null metadata to an empty string. So we store it as an empty string as well on the client // to be consistent. - if (metadata == null) - this.metadata = OffsetFetchResponse.NO_METADATA; - else - this.metadata = metadata; + this.metadata = Objects.requireNonNullElse(metadata, OffsetFetchResponse.NO_METADATA); } /** @@ -82,6 +79,11 @@ public long offset() { return offset; } + /** + * Get the metadata of the previously consumed record. + * + * @return the metadata or empty string if no metadata + */ public String metadata() { return metadata; } @@ -106,21 +108,20 @@ public boolean equals(Object o) { OffsetAndMetadata that = (OffsetAndMetadata) o; return offset == that.offset && Objects.equals(metadata, that.metadata) && - Objects.equals(leaderEpoch, that.leaderEpoch); + Objects.equals(leaderEpoch(), that.leaderEpoch()); } @Override public int hashCode() { - return Objects.hash(offset, metadata, leaderEpoch); + return Objects.hash(offset, metadata, leaderEpoch()); } @Override public String toString() { return "OffsetAndMetadata{" + "offset=" + offset + - ", leaderEpoch=" + leaderEpoch + + ", leaderEpoch=" + leaderEpoch().orElse(null) + ", metadata='" + metadata + '\'' + '}'; } - } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java index 900c249d85266..58f5fc4d38ea9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java @@ -70,6 +70,11 @@ public interface ShareConsumer extends Closeable { */ void acknowledge(ConsumerRecord record, AcknowledgeType type); + /** + * @see KafkaShareConsumer#acknowledge(String, int, long, AcknowledgeType) + */ + void acknowledge(String topic, int partition, long offset, AcknowledgeType type); + /** * @see KafkaShareConsumer#commitSync() */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java index a07e12a518abb..4098a1fea8828 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java @@ -1170,7 +1170,7 @@ protected void handlePollTimeoutExpiry() { public synchronized RequestFuture maybeLeaveGroup(CloseOptions.GroupMembershipOperation membershipOperation, String leaveReason) { RequestFuture future = null; - if (rebalanceConfig.leaveGroupOnClose && shouldSendLeaveGroupRequest(membershipOperation)) { + if (shouldSendLeaveGroupRequest(membershipOperation)) { log.info("Member {} sending LeaveGroup request to coordinator {} due to {}", generation.memberId, coordinator, leaveReason); LeaveGroupRequest.Builder request = new LeaveGroupRequest.Builder( diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java index 533cbbbaa9851..e3e52f7525dd9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java @@ -45,6 +45,7 @@ import java.io.Closeable; import java.time.Duration; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -54,7 +55,6 @@ import java.util.Optional; import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.FetchUtils.requestMetadataUpdate; @@ -147,6 +147,7 @@ public boolean hasAvailableFetches() { * @param data {@link FetchSessionHandler.FetchRequestData} that represents the session data * @param resp {@link ClientResponse} from which the {@link FetchResponse} will be retrieved */ + @SuppressWarnings("NPathComplexity") protected void handleFetchSuccess(final Node fetchTarget, final FetchSessionHandler.FetchRequestData data, final ClientResponse resp) { @@ -174,6 +175,8 @@ protected void handleFetchSuccess(final Node fetchTarget, final Set partitions = new HashSet<>(responseData.keySet()); final FetchMetricsAggregator metricAggregator = new FetchMetricsAggregator(metricsManager, partitions); + boolean needsWakeup = true; + Map partitionsWithUpdatedLeaderInfo = new HashMap<>(); for (Map.Entry entry : responseData.entrySet()) { TopicPartition partition = entry.getKey(); @@ -220,13 +223,24 @@ protected void handleFetchSuccess(final Node fetchTarget, metricAggregator, fetchOffset); fetchBuffer.add(completedFetch); + needsWakeup = false; } + // "Wake" the fetch buffer on any response, even if it's empty, to allow the consumer to not block + // indefinitely waiting on the fetch buffer to get data. + if (needsWakeup) + fetchBuffer.wakeup(); + if (!partitionsWithUpdatedLeaderInfo.isEmpty()) { - List leaderNodes = response.data().nodeEndpoints().stream() - .map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack())) - .filter(e -> !e.equals(Node.noNode())) - .collect(Collectors.toList()); + List leaderNodes = new ArrayList<>(); + + for (FetchResponseData.NodeEndpoint e : response.data().nodeEndpoints()) { + Node node = new Node(e.nodeId(), e.host(), e.port(), e.rack()); + + if (!node.equals(Node.noNode())) + leaderNodes.add(node); + } + Set updatedPartitions = metadata.updatePartitionLeadership(partitionsWithUpdatedLeaderInfo, leaderNodes); updatedPartitions.forEach( tp -> { @@ -397,7 +411,7 @@ protected Map prepareCloseFetchSessi fetchable.put(fetchTarget, sessionHandler.newBuilder()); }); - return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build())); + return convert(fetchable); } /** @@ -470,7 +484,21 @@ protected Map prepareFetchRequests() } } - return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build())); + return convert(fetchable); + } + + /** + * This method converts {@link FetchSessionHandler.Builder} instances to + * {@link FetchSessionHandler.FetchRequestData} instances. It intentionally forgoes use of the Java Collections + * Streams API to reduce overhead in the critical network path. + */ + private Map convert(Map fetchable) { + Map map = new HashMap<>(fetchable.size()); + + for (Map.Entry entry : fetchable.entrySet()) + map.put(entry.getKey(), entry.getValue().build()); + + return map; } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java index 9d219907926d4..3998d672006a3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java @@ -63,8 +63,9 @@ public abstract class AbstractHeartbeatRequestManager impl */ static final Utils.TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new Utils.TopicPartitionComparator(); - /** - * TopicIdPartition comparator based on topic name and partition (ignoring topic ID while sorting, - * as this is sorted mainly for logging purposes). - */ - static final Utils.TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new Utils.TopicIdPartitionComparator(); - /** * Group ID of the consumer group the member will be part of, provided when creating the current * membership manager. @@ -376,9 +368,12 @@ protected void processAssignmentReceived(Map> assignmen */ private void replaceTargetAssignmentWithNewAssignment(Map> assignment) { currentTargetAssignment.updateWith(assignment).ifPresent(updatedAssignment -> { - log.debug("Target assignment updated from {} to {}. Member will reconcile it on the next poll.", - currentTargetAssignment, updatedAssignment); + log.debug("Member {} updated its target assignment from {} to {}. Member will reconcile it on the next poll.", + memberId, currentTargetAssignment, updatedAssignment); currentTargetAssignment = updatedAssignment; + // Register the assigned topic IDs on the subscription state. + // This will be used to ensure they are included in metadata requests (even though they may not be reconciled yet). + subscriptions.setAssignedTopicIds(currentTargetAssignment.partitions.keySet()); }); } @@ -449,7 +444,7 @@ public void transitionToFatal() { log.error("Member {} with epoch {} transitioned to fatal state", memberId, memberEpoch); notifyEpochChange(Optional.empty()); - if (previousState == MemberState.UNSUBSCRIBED) { + if (previousState == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} got fatal error from the broker but it already " + "left the group, so onPartitionsLost callback won't be triggered.", memberId, memberEpoch); return; @@ -517,11 +512,10 @@ private void clearAssignment() { * @param assignedPartitions Full assignment, to update in the subscription state * @param addedPartitions Newly added partitions */ - private void updateSubscriptionAwaitingCallback(SortedSet assignedPartitions, + private void updateSubscriptionAwaitingCallback(TopicIdPartitionSet assignedPartitions, SortedSet addedPartitions) { - Set assignedTopicPartitions = toTopicPartitionSet(assignedPartitions); - subscriptions.assignFromSubscribedAwaitingCallback(assignedTopicPartitions, addedPartitions); - notifyAssignmentChange(assignedTopicPartitions); + subscriptions.assignFromSubscribedAwaitingCallback(assignedPartitions.topicPartitions(), addedPartitions); + notifyAssignmentChange(assignedPartitions.topicPartitions()); } /** @@ -541,6 +535,7 @@ public void transitionToJoining() { } resetEpoch(); transitionTo(MemberState.JOINING); + log.debug("Member {} will join the group on the next call to poll.", memberId); clearPendingAssignmentsAndLocalNamesCache(); } @@ -618,6 +613,8 @@ protected CompletableFuture leaveGroup(boolean runCallbacks) { clearAssignmentAndLeaveGroup(); }); } else { + log.debug("Member {} attempting to leave has no rebalance callbacks, " + + "so it will clear assignments and transition to send heartbeat to leave group.", memberId); clearAssignmentAndLeaveGroup(); } @@ -708,8 +705,10 @@ public void onHeartbeatRequestGenerated() { transitionTo(MemberState.STABLE); } else { log.debug("Member {} with epoch {} transitioned to {} after a heartbeat was sent " + - "to ack a previous reconciliation. New assignments are ready to " + - "be reconciled.", memberId, memberEpoch, MemberState.RECONCILING); + "to ack a previous reconciliation. \n" + + "\t\tCurrent assignment: {} \n" + + "\t\tTarget assignment: {}\n", + memberId, memberEpoch, MemberState.RECONCILING, currentAssignment, currentTargetAssignment); transitionTo(MemberState.RECONCILING); } } else if (state == MemberState.LEAVING) { @@ -832,14 +831,14 @@ public void maybeReconcile(boolean canCommit) { return; } if (reconciliationInProgress) { - log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. Assignment " + - currentTargetAssignment + " will be handled in the next reconciliation loop."); + log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. " + + "Assignment {} will be handled in the next reconciliation loop.", currentTargetAssignment); return; } // Find the subset of the target assignment that can be resolved to topic names, and trigger a metadata update // if some topic IDs are not resolvable. - SortedSet assignedTopicIdPartitions = findResolvableAssignmentAndTriggerMetadataUpdate(); + TopicIdPartitionSet assignedTopicIdPartitions = findResolvableAssignmentAndTriggerMetadataUpdate(); final LocalAssignment resolvedAssignment = new LocalAssignment(currentTargetAssignment.localEpoch, assignedTopicIdPartitions); if (!currentAssignment.isNone() && resolvedAssignment.partitions.equals(currentAssignment.partitions)) { @@ -857,7 +856,7 @@ public void maybeReconcile(boolean canCommit) { // Keep copy of assigned TopicPartitions created from the TopicIdPartitions that are // being reconciled. Needed for interactions with the centralized subscription state that // does not support topic IDs yet, and for the callbacks. - SortedSet assignedTopicPartitions = toTopicPartitionSet(assignedTopicIdPartitions); + SortedSet assignedTopicPartitions = assignedTopicIdPartitions.toTopicNamePartitionSet(); SortedSet ownedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); ownedPartitions.addAll(subscriptions.assignedPartitions()); @@ -934,7 +933,7 @@ long getDeadlineMsForTimeout(final long timeoutMs) { * transition. Note that if any of the 2 callbacks fails, the reconciliation should fail. */ private void revokeAndAssign(LocalAssignment resolvedAssignment, - SortedSet assignedTopicIdPartitions, + TopicIdPartitionSet assignedTopicIdPartitions, SortedSet revokedPartitions, SortedSet addedPartitions) { CompletableFuture revocationResult; @@ -989,7 +988,7 @@ boolean maybeAbortReconciliation() { String reason = rejoinedWhileReconciliationInProgress ? "the member has re-joined the group" : "the member already transitioned out of the reconciling state into " + state; - log.info("Interrupting reconciliation that is not relevant anymore because " + reason); + log.info("Interrupting reconciliation that is not relevant anymore because {}", reason); markReconciliationCompleted(); } return shouldAbort; @@ -1031,15 +1030,6 @@ protected CompletableFuture signalPartitionsLost(Set parti return CompletableFuture.completedFuture(null); } - /** - * Build set of {@link TopicPartition} from the given set of {@link TopicIdPartition}. - */ - protected SortedSet toTopicPartitionSet(SortedSet topicIdPartitions) { - SortedSet result = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); - topicIdPartitions.forEach(topicIdPartition -> result.add(topicIdPartition.topicPartition())); - return result; - } - /** * Visible for testing. */ @@ -1073,8 +1063,8 @@ void markReconciliationCompleted() { * * */ - private SortedSet findResolvableAssignmentAndTriggerMetadataUpdate() { - final SortedSet assignmentReadyToReconcile = new TreeSet<>(TOPIC_ID_PARTITION_COMPARATOR); + private TopicIdPartitionSet findResolvableAssignmentAndTriggerMetadataUpdate() { + final TopicIdPartitionSet assignmentReadyToReconcile = new TopicIdPartitionSet(); final HashMap> unresolved = new HashMap<>(currentTargetAssignment.partitions); // Try to resolve topic names from metadata cache or subscription cache, and move @@ -1088,9 +1078,7 @@ private SortedSet findResolvableAssignmentAndTriggerMetadataUp Optional nameFromMetadata = findTopicNameInGlobalOrLocalCache(topicId); nameFromMetadata.ifPresent(resolvedTopicName -> { // Name resolved, so assignment is ready for reconciliation. - topicPartitions.forEach(tp -> - assignmentReadyToReconcile.add(new TopicIdPartition(topicId, tp, resolvedTopicName)) - ); + assignmentReadyToReconcile.addAll(topicId, resolvedTopicName, topicPartitions); it.remove(); }); } @@ -1146,7 +1134,7 @@ CompletableFuture revokePartitions(Set partitionsToRevoke) // Ensure the set of partitions to revoke are still assigned Set revokedPartitions = new HashSet<>(partitionsToRevoke); revokedPartitions.retainAll(subscriptions.assignedPartitions()); - log.info("Revoking previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("Revoking previously assigned partitions {}", revokedPartitions); signalPartitionsBeingRevoked(revokedPartitions); @@ -1200,7 +1188,7 @@ CompletableFuture revokePartitions(Set partitionsToRevoke) * @return Future that will complete when the callback execution completes. */ private CompletableFuture assignPartitions( - SortedSet assignedPartitions, + TopicIdPartitionSet assignedPartitions, SortedSet addedPartitions) { // Update assignment in the subscription state, and ensure that no fetching or positions @@ -1218,7 +1206,7 @@ private CompletableFuture assignPartitions( // returning no records, as no topic partitions are marked as fetchable. In contrast, with the classic consumer, // if the first callback fails but the next one succeeds, polling can still retrieve data. To align with // this behavior, we rely on assignedPartitions to avoid such scenarios. - subscriptions.enablePartitionsAwaitingCallback(toTopicPartitionSet(assignedPartitions)); + subscriptions.enablePartitionsAwaitingCallback(assignedPartitions.topicPartitions()); } else { // Keeping newly added partitions as non-fetchable after the callback failure. // They will be retried on the next reconciliation loop, until it succeeds or the @@ -1232,7 +1220,7 @@ private CompletableFuture assignPartitions( }); // Clear topic names cache, removing topics that are not assigned to the member anymore. - Set assignedTopics = assignedPartitions.stream().map(TopicIdPartition::topic).collect(Collectors.toSet()); + Set assignedTopics = assignedPartitions.topicNames(); assignedTopicNamesCache.values().retainAll(assignedTopics); return result; @@ -1450,16 +1438,13 @@ public LocalAssignment(long localEpoch, Map> partitions } } - public LocalAssignment(long localEpoch, SortedSet topicIdPartitions) { + public LocalAssignment(long localEpoch, TopicIdPartitionSet topicIdPartitions) { + Objects.requireNonNull(topicIdPartitions); this.localEpoch = localEpoch; - this.partitions = new HashMap<>(); if (localEpoch == NONE_EPOCH && !topicIdPartitions.isEmpty()) { throw new IllegalArgumentException("Local epoch must be set if there are partitions"); } - topicIdPartitions.forEach(topicIdPartition -> { - Uuid topicId = topicIdPartition.topicId(); - partitions.computeIfAbsent(topicId, k -> new TreeSet<>()).add(topicIdPartition.partition()); - }); + this.partitions = topicIdPartitions.toTopicIdPartitionMap(); } public String toString() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java index 4ac1513ede52d..c38b5859f5f59 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java @@ -965,8 +965,8 @@ private class GeneralAssignmentBuilder extends AbstractAssignmentBuilder { super(partitionsPerTopic, rackInfo, currentAssignment); this.subscriptions = subscriptions; - topic2AllPotentialConsumers = new HashMap<>(partitionsPerTopic.keySet().size()); - consumer2AllPotentialTopics = new HashMap<>(subscriptions.keySet().size()); + topic2AllPotentialConsumers = new HashMap<>(partitionsPerTopic.size()); + consumer2AllPotentialTopics = new HashMap<>(subscriptions.size()); // initialize topic2AllPotentialConsumers and consumer2AllPotentialTopics partitionsPerTopic.keySet().forEach( diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java index 8d3fab2358779..5bce77651b9c9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java @@ -185,7 +185,7 @@ public List getAcknowledgementBatches() { currentBatch.acknowledgeTypes().add(ACKNOWLEDGE_TYPE_GAP); } } - List optimalBatches = maybeOptimiseAcknowledgementTypes(currentBatch); + List optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch); optimalBatches.forEach(batch -> { if (canOptimiseForSingleAcknowledgeType(batch)) { @@ -204,7 +204,7 @@ public List getAcknowledgementBatches() { */ private AcknowledgementBatch maybeCreateNewBatch(AcknowledgementBatch currentBatch, Long nextOffset, List batches) { if (nextOffset != currentBatch.lastOffset() + 1) { - List optimalBatches = maybeOptimiseAcknowledgementTypes(currentBatch); + List optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch); optimalBatches.forEach(batch -> { if (canOptimiseForSingleAcknowledgeType(batch)) { @@ -228,7 +228,7 @@ private AcknowledgementBatch maybeCreateNewBatch(AcknowledgementBatch currentBat * whose count exceeds the default value. In this case, the batch is split into 2 such that the * batch with the continuous records has only 1 acknowledge type in its array. */ - private List maybeOptimiseAcknowledgementTypes(AcknowledgementBatch currentAcknowledgeBatch) { + private List maybeOptimiseAcknowledgeTypes(AcknowledgementBatch currentAcknowledgeBatch) { List batches = new ArrayList<>(); if (currentAcknowledgeBatch == null) return batches; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index 249904f87b0ae..938ae909027d0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -78,6 +78,7 @@ import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent; import org.apache.kafka.clients.consumer.internals.events.UpdatePatternSubscriptionEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceCallbackMetricsManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; @@ -143,7 +144,7 @@ import static java.util.Objects.requireNonNull; import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.configuredConsumerInterceptors; @@ -186,25 +187,6 @@ public class AsyncKafkaConsumer implements ConsumerDelegate { */ private class BackgroundEventProcessor implements EventProcessor { - private Optional streamsGroupRebalanceCallbacks = Optional.empty(); - private final Optional streamsRebalanceData; - - public BackgroundEventProcessor() { - this.streamsRebalanceData = Optional.empty(); - } - - public BackgroundEventProcessor(final Optional streamsRebalanceData) { - this.streamsRebalanceData = streamsRebalanceData; - } - - private void setStreamsRebalanceListener(final StreamsRebalanceListener streamsRebalanceListener) { - if (streamsRebalanceData.isEmpty()) { - throw new IllegalStateException("Background event processor was not created to be used with Streams " + - "rebalance protocol events"); - } - this.streamsGroupRebalanceCallbacks = Optional.of(streamsRebalanceListener); - } - @Override public void process(final BackgroundEvent event) { switch (event.type()) { @@ -277,49 +259,26 @@ private void processStreamsOnAllTasksLostCallbackNeededEvent(final StreamsOnAllT private StreamsOnTasksRevokedCallbackCompletedEvent invokeOnTasksRevokedCallback(final Set activeTasksToRevoke, final CompletableFuture future) { - final Optional error; - final Optional exceptionFromCallback = streamsGroupRebalanceCallbacks().onTasksRevoked(activeTasksToRevoke); - if (exceptionFromCallback.isPresent()) { - error = Optional.of(ConsumerUtils.maybeWrapAsKafkaException(exceptionFromCallback.get(), "Task revocation callback throws an error")); - } else { - error = Optional.empty(); - } + final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksRevoked(activeTasksToRevoke)); + final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task revocation callback throws an error")); return new StreamsOnTasksRevokedCallbackCompletedEvent(future, error); } private StreamsOnTasksAssignedCallbackCompletedEvent invokeOnTasksAssignedCallback(final StreamsRebalanceData.Assignment assignment, final CompletableFuture future) { - final Optional error; - final Optional exceptionFromCallback = streamsGroupRebalanceCallbacks().onTasksAssigned(assignment); - if (exceptionFromCallback.isPresent()) { - error = Optional.of(ConsumerUtils.maybeWrapAsKafkaException(exceptionFromCallback.get(), "Task assignment callback throws an error")); - } else { - error = Optional.empty(); - streamsRebalanceData().setReconciledAssignment(assignment); - } + final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksAssigned(assignment)); + final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task assignment callback throws an error")); return new StreamsOnTasksAssignedCallbackCompletedEvent(future, error); } private StreamsOnAllTasksLostCallbackCompletedEvent invokeOnAllTasksLostCallback(final CompletableFuture future) { - final Optional error; - final Optional exceptionFromCallback = streamsGroupRebalanceCallbacks().onAllTasksLost(); - if (exceptionFromCallback.isPresent()) { - error = Optional.of(ConsumerUtils.maybeWrapAsKafkaException(exceptionFromCallback.get(), "All tasks lost callback throws an error")); - } else { - error = Optional.empty(); - streamsRebalanceData().setReconciledAssignment(StreamsRebalanceData.Assignment.EMPTY); - } + final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeAllTasksLost()); + final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "All tasks lost callback throws an error")); return new StreamsOnAllTasksLostCallbackCompletedEvent(future, error); } - private StreamsRebalanceData streamsRebalanceData() { - return streamsRebalanceData.orElseThrow( - () -> new IllegalStateException("Background event processor was not created to be used with Streams " + - "rebalance protocol events")); - } - - private StreamsRebalanceListener streamsGroupRebalanceCallbacks() { - return streamsGroupRebalanceCallbacks.orElseThrow( + private StreamsRebalanceListenerInvoker streamsRebalanceListenerInvoker() { + return streamsRebalanceListenerInvoker.orElseThrow( () -> new IllegalStateException("Background event processor was not created to be used with Streams " + "rebalance protocol events")); } @@ -328,7 +287,8 @@ private StreamsRebalanceListener streamsGroupRebalanceCallbacks() { private final ApplicationEventHandler applicationEventHandler; private final Time time; private final AtomicReference> groupMetadata = new AtomicReference<>(Optional.empty()); - private final AsyncConsumerMetrics kafkaConsumerMetrics; + private final AsyncConsumerMetrics asyncConsumerMetrics; + private final KafkaConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; private final BlockingQueue backgroundEventQueue; @@ -370,6 +330,7 @@ private StreamsRebalanceListener streamsGroupRebalanceCallbacks() { private final WakeupTrigger wakeupTrigger = new WakeupTrigger(); private final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; + private final Optional streamsRebalanceListenerInvoker; // Last triggered async commit future. Used to wait until all previous async commits are completed. // We only need to keep track of the last one, since they are guaranteed to complete in order. private CompletableFuture> lastPendingAsyncCommit = null; @@ -390,7 +351,7 @@ public void onGroupAssignmentUpdated(Set partitions) { setGroupAssignmentSnapshot(partitions); } }; - + public AsyncKafkaConsumer(final ConsumerConfig config, final Deserializer keyDeserializer, final Deserializer valueDeserializer, @@ -439,7 +400,8 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); - this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); @@ -463,7 +425,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - kafkaConsumerMetrics + asyncConsumerMetrics ); // This FetchBuffer is shared between the application and network threads. @@ -478,7 +440,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), backgroundEventHandler, false, - kafkaConsumerMetrics + asyncConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); @@ -511,7 +473,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, - kafkaConsumerMetrics + asyncConsumerMetrics ); this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, @@ -519,7 +481,9 @@ public AsyncKafkaConsumer(final ConsumerConfig config, time, new RebalanceCallbackMetricsManager(metrics) ); - this.backgroundEventProcessor = new BackgroundEventProcessor(streamsRebalanceData); + this.streamsRebalanceListenerInvoker = streamsRebalanceData.map(s -> + new StreamsRebalanceListenerInvoker(logContext, s)); + this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); // The FetchCollector is only used on the application thread. @@ -579,6 +543,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.time = time; this.backgroundEventQueue = backgroundEventQueue; this.rebalanceListenerInvoker = rebalanceListenerInvoker; + this.streamsRebalanceListenerInvoker = Optional.empty(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; @@ -589,14 +554,15 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.defaultApiTimeoutMs = Duration.ofMillis(defaultApiTimeoutMs); this.deserializers = deserializers; this.applicationEventHandler = applicationEventHandler; - this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); this.clientTelemetryReporter = Optional.empty(); this.autoCommitEnabled = autoCommitEnabled; this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - kafkaConsumerMetrics + asyncConsumerMetrics ); } @@ -624,7 +590,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.clientTelemetryReporter = Optional.empty(); - ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); + ConsumerMetrics metricsRegistry = new ConsumerMetrics(); FetchMetricsManager fetchMetricsManager = new FetchMetricsManager(metrics, metricsRegistry.fetcherMetrics); this.fetchCollector = new FetchCollector<>(logContext, metadata, @@ -633,7 +599,8 @@ public AsyncKafkaConsumer(final ConsumerConfig config, deserializers, fetchMetricsManager, time); - this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, @@ -647,7 +614,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - kafkaConsumerMetrics + asyncConsumerMetrics ); this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, @@ -664,7 +631,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, metadata, backgroundEventHandler, false, - kafkaConsumerMetrics + asyncConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); Supplier requestManagersSupplier = RequestManagers.supplier( @@ -698,7 +665,8 @@ public AsyncKafkaConsumer(final ConsumerConfig config, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, - kafkaConsumerMetrics); + asyncConsumerMetrics); + this.streamsRebalanceListenerInvoker = Optional.empty(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); } @@ -932,7 +900,7 @@ public void commitAsync(OffsetCommitCallback callback) { @Override public void commitAsync(Map offsets, OffsetCommitCallback callback) { - commitAsync(Optional.of(offsets), callback); + commitAsync(Optional.of(new HashMap<>(offsets)), callback); } private void commitAsync(Optional> offsets, OffsetCommitCallback callback) { @@ -959,7 +927,7 @@ private void commitAsync(Optional> offset } private CompletableFuture> commit(final CommitEvent commitEvent) { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); offsetCommitCallbackInvoker.executeCallbacks(); if (commitEvent.offsets().isPresent() && commitEvent.offsets().get().isEmpty()) { @@ -1088,7 +1056,7 @@ public Map committed(final Set committed(final Set beginningOrEndOffset(Collection(); } Map offsetAndTimestampMap; @@ -1348,7 +1319,7 @@ public OptionalLong currentLag(TopicPartition topicPartition) { public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); return groupMetadata.get().get(); } finally { release(); @@ -1370,8 +1341,8 @@ public void close() { close(CloseOptions.timeout(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS))); } + @Deprecated @Override - @SuppressWarnings("deprecation") public void close(Duration timeout) { close(CloseOptions.timeout(timeout)); } @@ -1474,7 +1445,7 @@ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membe () -> autoCommitOnClose(closeTimer), firstException); swallow(log, Level.ERROR, "Failed to stop finding coordinator", this::stopFindCoordinatorOnClose, firstException); - swallow(log, Level.ERROR, "Failed to release group assignment", + swallow(log, Level.ERROR, "Failed to run rebalance callbacks", this::runRebalanceCallbacksOnClose, firstException); swallow(log, Level.ERROR, "Failed to leave group while closing consumer", () -> leaveGroupOnClose(closeTimer, membershipOperation), firstException); @@ -1491,6 +1462,7 @@ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membe closeQuietly(interceptors, "consumer interceptors", firstException); closeQuietly(kafkaConsumerMetrics, "kafka consumer metrics", firstException); + closeQuietly(asyncConsumerMetrics, "async consumer metrics", firstException); closeQuietly(metrics, "consumer metrics", firstException); closeQuietly(deserializers, "consumer deserializers", firstException); clientTelemetryReporter.ifPresent(reporter -> closeQuietly(reporter, "async consumer telemetry reporter", firstException)); @@ -1513,7 +1485,7 @@ private Timer createTimerForCloseRequests(Duration timeout) { } private void autoCommitOnClose(final Timer timer) { - if (groupMetadata.get().isEmpty()) + if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; if (autoCommitEnabled) @@ -1528,28 +1500,41 @@ private void runRebalanceCallbacksOnClose() { int memberEpoch = groupMetadata.get().get().generationId(); - Set assignedPartitions = groupAssignmentSnapshot.get(); + Exception error = null; - if (assignedPartitions.isEmpty()) - // Nothing to revoke. - return; + if (streamsRebalanceListenerInvoker != null && streamsRebalanceListenerInvoker.isPresent()) { - SortedSet droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); - droppedPartitions.addAll(assignedPartitions); + if (memberEpoch > 0) { + error = streamsRebalanceListenerInvoker.get().invokeAllTasksRevoked(); + } else { + error = streamsRebalanceListenerInvoker.get().invokeAllTasksLost(); + } - final Exception error; + } else if (rebalanceListenerInvoker != null) { - if (memberEpoch > 0) - error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); - else - error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); + Set assignedPartitions = groupAssignmentSnapshot.get(); + + if (assignedPartitions.isEmpty()) + // Nothing to revoke. + return; + + SortedSet droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + droppedPartitions.addAll(assignedPartitions); + + if (memberEpoch > 0) { + error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); + } else { + error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); + } + + } if (error != null) throw ConsumerUtils.maybeWrapAsKafkaException(error); } private void leaveGroupOnClose(final Timer timer, final CloseOptions.GroupMembershipOperation membershipOperation) { - if (groupMetadata.get().isEmpty()) + if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; log.debug("Leaving the consumer group during consumer close"); @@ -1565,7 +1550,7 @@ private void leaveGroupOnClose(final Timer timer, final CloseOptions.GroupMember } private void stopFindCoordinatorOnClose() { - if (groupMetadata.get().isEmpty()) + if (groupMetadata.get().isEmpty() || applicationEventHandler == null) return; log.debug("Stop finding coordinator during consumer close"); applicationEventHandler.add(new StopFindCoordinatorOnCloseEvent()); @@ -1601,12 +1586,12 @@ public void commitSync(final Duration timeout) { @Override public void commitSync(Map offsets) { - commitSync(Optional.of(offsets), defaultApiTimeoutMs); + commitSync(Optional.of(new HashMap<>(offsets)), defaultApiTimeoutMs); } @Override public void commitSync(Map offsets, Duration timeout) { - commitSync(Optional.of(offsets), timeout); + commitSync(Optional.of(new HashMap<>(offsets)), timeout); } private void commitSync(Optional> offsets, Duration timeout) { @@ -1630,7 +1615,7 @@ private void commitSync(Optional> offsets } private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, boolean enableWakeup) { - if (lastPendingAsyncCommit == null) { + if (lastPendingAsyncCommit == null || offsetCommitCallbackInvoker == null) { return; } @@ -1807,7 +1792,7 @@ private Fetch pollForFetches(Timer timer) { // use of a shorter, dedicated "pollTimer" here which updates "timer" so that calling method (poll) will // correctly handle the overall timeout. try { - fetchBuffer.awaitNotEmpty(pollTimer); + fetchBuffer.awaitWakeup(pollTimer); } catch (InterruptException e) { log.trace("Interrupt during fetch", e); throw e; @@ -1901,7 +1886,7 @@ private boolean isCommittedOffsetsManagementEnabled() { private void sendFetches(Timer timer) { try { applicationEventHandler.addAndGet(new CreateFetchRequestsEvent(calculateDeadlineMs(timer))); - } catch (TimeoutException e) { + } catch (TimeoutException swallow) { // Can be ignored, per above comments. } } @@ -1959,8 +1944,12 @@ public void subscribe(Collection topics, ConsumerRebalanceListener liste } public void subscribe(Collection topics, StreamsRebalanceListener streamsRebalanceListener) { + + streamsRebalanceListenerInvoker + .orElseThrow(() -> new IllegalStateException("Consumer was not created to be used with Streams rebalance protocol events")) + .setRebalanceListener(streamsRebalanceListener); + subscribeInternal(topics, Optional.empty()); - backgroundEventProcessor.setStreamsRebalanceListener(streamsRebalanceListener); } @Override @@ -2030,7 +2019,7 @@ private void release() { private void subscribeInternal(Pattern pattern, Optional listener) { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); @@ -2054,7 +2043,7 @@ private void subscribeToRegex(SubscriptionPattern pattern, Optional listener) { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); throwIfSubscriptionPatternIsInvalid(pattern); log.info("Subscribing to regular expression {}", pattern); applicationEventHandler.addAndGet(new TopicRe2JPatternSubscriptionChangeEvent( @@ -2078,7 +2067,7 @@ private void throwIfSubscriptionPatternIsInvalid(SubscriptionPattern subscriptio private void subscribeInternal(Collection topics, Optional listener) { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { @@ -2126,7 +2115,7 @@ boolean processBackgroundEvents() { if (!events.isEmpty()) { long startMs = time.milliseconds(); for (BackgroundEvent event : events) { - kafkaConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); + asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent) event); @@ -2139,7 +2128,7 @@ boolean processBackgroundEvents() { log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } - kafkaConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); + asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); @@ -2214,7 +2203,7 @@ T processBackgroundEvents(Future future, Timer timer, Predicate implements ConsumerDelegate { config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG), this.interceptors, config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED), - config.getString(ConsumerConfig.CLIENT_RACK_CONFIG), clientTelemetryReporter); } this.fetcher = new Fetcher<>( @@ -257,7 +256,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { retryBackoffMs, retryBackoffMaxMs); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); config.logUnused(); AppInfoParser.registerAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -297,7 +296,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { this.isolationLevel = ConsumerUtils.configuredIsolationLevel(config); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.assignors = assignors; - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); @@ -330,9 +329,9 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { heartbeatIntervalMs, groupId.get(), groupInstanceId, + rackId, retryBackoffMs, - retryBackoffMaxMs, - true + retryBackoffMaxMs ); this.coordinator = new ConsumerCoordinator( rebalanceConfig, @@ -348,7 +347,6 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported, - rackId, clientTelemetryReporter ); } else { @@ -362,7 +360,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { int maxPollRecords = config.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG); boolean checkCrcs = config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG); - ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); + ConsumerMetrics metricsRegistry = new ConsumerMetrics(); FetchMetricsManager metricsManager = new FetchMetricsManager(metrics, metricsRegistry.fetcherMetrics); ApiVersions apiVersions = new ApiVersions(); FetchConfig fetchConfig = new FetchConfig( @@ -478,7 +476,7 @@ public void subscribe(Collection topics) { private void subscribeInternal(Collection topics, Optional listener) { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { @@ -559,7 +557,7 @@ public void subscribe(SubscriptionPattern pattern) { * configured at-least one partition assignment strategy */ private void subscribeInternal(Pattern pattern, Optional listener) { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); @@ -743,7 +741,7 @@ public void commitSync(final Map offsets, fin acquireAndEnsureOpen(); long commitStart = time.nanoseconds(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); offsets.forEach(this::updateLastSeenEpochIfNewer); if (!coordinator.commitOffsetsSync(new HashMap<>(offsets), time.timer(timeout))) { throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before successfully " + @@ -769,7 +767,7 @@ public void commitAsync(OffsetCommitCallback callback) { public void commitAsync(final Map offsets, OffsetCommitCallback callback) { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); log.debug("Committing offsets: {}", offsets); offsets.forEach(this::updateLastSeenEpochIfNewer); coordinator.commitOffsetsAsync(new HashMap<>(offsets), callback); @@ -890,7 +888,7 @@ public Map committed(final Set offsets; offsets = coordinator.fetchCommittedOffsets(partitions, time.timer(timeout)); if (offsets == null) { @@ -1079,7 +1077,7 @@ public OptionalLong currentLag(TopicPartition topicPartition) { public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { - maybeThrowInvalidGroupIdException(); + throwIfGroupIdNotDefined(); return coordinator.groupMetadata(); } finally { release(); @@ -1109,8 +1107,8 @@ public void close() { close(CloseOptions.timeout(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS))); } + @Deprecated @Override - @SuppressWarnings("deprecation") public void close(Duration timeout) { close(CloseOptions.timeout(timeout)); } @@ -1273,7 +1271,7 @@ private void throwIfNoAssignorsConfigured() { ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " configuration property"); } - private void maybeThrowInvalidGroupIdException() { + private void throwIfGroupIdNotDefined() { if (groupId.isEmpty()) throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " + "provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration."); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 284707a812b53..6aae084fd47e2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -34,6 +34,8 @@ import org.apache.kafka.common.errors.UnstableOffsetCommitException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; +import org.apache.kafka.common.message.OffsetFetchRequestData; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; @@ -42,6 +44,7 @@ import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; @@ -178,6 +181,14 @@ public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { // poll when the coordinator node is known and fatal error is not present if (coordinatorRequestManager.coordinator().isEmpty()) { pendingRequests.maybeFailOnCoordinatorFatalError(); + + if (closing && pendingRequests.hasUnsentRequests()) { + CommitFailedException exception = new CommitFailedException( + "Failed to commit offsets: Coordinator unknown and consumer is closing"); + pendingRequests.drainPendingCommits() + .forEach(request -> request.future().completeExceptionally(exception)); + } + return EMPTY; } @@ -538,7 +549,7 @@ private void fetchOffsetsWithRetries(final OffsetFetchRequestState fetchRequest, boolean inflightRemoved = pendingRequests.inflightOffsetFetches.remove(fetchRequest); if (!inflightRemoved) { log.warn("A duplicated, inflight, request was identified, but unable to find it in the " + - "outbound buffer:" + fetchRequest); + "outbound buffer: {}", fetchRequest); } if (error == null) { maybeUpdateLastSeenEpochIfNewer(res); @@ -586,6 +597,8 @@ public void onMemberEpochUpdated(Optional memberEpoch, String memberId) if (memberEpoch.isEmpty() && memberInfo.memberEpoch.isPresent()) { log.info("Member {} won't include epoch in following offset " + "commit/fetch requests because it has left the group.", memberInfo.memberId); + } else if (memberEpoch.isPresent()) { + log.debug("Member {} will include new member epoch {} in following offset commit/fetch requests.", memberId, memberEpoch); } memberInfo.memberId = memberId; memberInfo.memberEpoch = memberEpoch; @@ -727,7 +740,7 @@ public NetworkClientDelegate.UnsentRequest toUnsentRequest() { lastEpochSentOnCommit = Optional.empty(); } - OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(data); + OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames(data); return buildRequestWithResponseHandling(builder); } @@ -970,21 +983,37 @@ public boolean sameRequest(final OffsetFetchRequestState request) { } public NetworkClientDelegate.UnsentRequest toUnsentRequest() { + List topics = requestedPartitions.stream() + .collect(Collectors.groupingBy(TopicPartition::topic)) + .entrySet() + .stream() + .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(entry.getKey()) + .setPartitionIndexes(entry.getValue().stream() + .map(TopicPartition::partition) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); - OffsetFetchRequest.Builder builder = memberInfo.memberEpoch. - map(epoch -> new OffsetFetchRequest.Builder( - groupId, - memberInfo.memberId, - epoch, - true, - new ArrayList<>(this.requestedPartitions), - throwOnFetchStableOffsetUnsupported)) + OffsetFetchRequest.Builder builder = memberInfo.memberEpoch + .map(epoch -> OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(true) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setMemberId(memberInfo.memberId) + .setMemberEpoch(epoch) + .setTopics(topics))), + throwOnFetchStableOffsetUnsupported)) // Building request without passing member ID/epoch to leave the logic to choose // default values when not present on the request builder. - .orElseGet(() -> new OffsetFetchRequest.Builder( - groupId, - true, - new ArrayList<>(this.requestedPartitions), + .orElseGet(() -> OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(true) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setTopics(topics))), throwOnFetchStableOffsetUnsupported)); return buildRequestWithResponseHandling(builder); } @@ -995,13 +1024,14 @@ public NetworkClientDelegate.UnsentRequest toUnsentRequest() { @Override void onResponse(final ClientResponse response) { long currentTimeMs = response.receivedTimeMs(); - OffsetFetchResponse fetchResponse = (OffsetFetchResponse) response.responseBody(); - Errors responseError = fetchResponse.groupLevelError(groupId); - if (responseError != Errors.NONE) { - onFailure(currentTimeMs, responseError); + var fetchResponse = (OffsetFetchResponse) response.responseBody(); + var groupResponse = fetchResponse.group(groupId); + var error = Errors.forCode(groupResponse.errorCode()); + if (error != Errors.NONE) { + onFailure(currentTimeMs, error); return; } - onSuccess(currentTimeMs, fetchResponse); + onSuccess(currentTimeMs, groupResponse); } /** @@ -1066,53 +1096,58 @@ void removeRequest() { * offsets contained in the response, and record a successful request attempt. */ private void onSuccess(final long currentTimeMs, - final OffsetFetchResponse response) { - Set unauthorizedTopics = null; - Map responseData = - response.partitionDataMap(groupId); - Map offsets = new HashMap<>(responseData.size()); - Set unstableTxnOffsetTopicPartitions = new HashSet<>(); - boolean failedRequestRegistered = false; - for (Map.Entry entry : responseData.entrySet()) { - TopicPartition tp = entry.getKey(); - OffsetFetchResponse.PartitionData partitionData = entry.getValue(); - if (partitionData.hasError()) { - Errors error = partitionData.error; - log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); + final OffsetFetchResponseData.OffsetFetchResponseGroup response) { + var offsets = new HashMap(); + var unstableTxnOffsetTopicPartitions = new HashSet(); + var unauthorizedTopics = new HashSet(); + var failedRequestRegistered = false; + + for (var topic : response.topics()) { + for (var partition : topic.partitions()) { + var tp = new TopicPartition( + topic.name(), + partition.partitionIndex() + ); + var error = Errors.forCode(partition.errorCode()); + if (error != Errors.NONE) { + log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); - if (!failedRequestRegistered) { - onFailedAttempt(currentTimeMs); - failedRequestRegistered = true; - } + if (!failedRequestRegistered) { + onFailedAttempt(currentTimeMs); + failedRequestRegistered = true; + } - if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { - future.completeExceptionally(new KafkaException("Topic or Partition " + tp + " does not exist")); - return; - } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { - if (unauthorizedTopics == null) { - unauthorizedTopics = new HashSet<>(); + if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { + future.completeExceptionally(new KafkaException("Topic or Partition " + tp + " does not exist")); + return; + } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { + unauthorizedTopics.add(tp.topic()); + } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { + unstableTxnOffsetTopicPartitions.add(tp); + } else { + // Fail with a non-retriable KafkaException for all unexpected partition + // errors (even if they are retriable) + future.completeExceptionally(new KafkaException("Unexpected error in fetch offset " + + "response for partition " + tp + ": " + error.message())); + return; } - unauthorizedTopics.add(tp.topic()); - } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { - unstableTxnOffsetTopicPartitions.add(tp); + } else if (partition.committedOffset() >= 0) { + // record the position with the offset (-1 indicates no committed offset to fetch); + // if there's no committed offset, record as null + offsets.put(tp, new OffsetAndMetadata( + partition.committedOffset(), + RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), + partition.metadata() + )); } else { - // Fail with a non-retriable KafkaException for all unexpected partition - // errors (even if they are retriable) - future.completeExceptionally(new KafkaException("Unexpected error in fetch offset " + - "response for partition " + tp + ": " + error.message())); - return; + log.info("Found no committed offset for partition {}", tp); + offsets.put(tp, null); } - } else if (partitionData.offset >= 0) { - // record the position with the offset (-1 indicates no committed offset to fetch); - // if there's no committed offset, record as null - offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); - } else { - log.info("Found no committed offset for partition {}", tp); - offsets.put(tp, null); + } } - if (unauthorizedTopics != null) { + if (!unauthorizedTopics.isEmpty()) { future.completeExceptionally(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // TODO: Optimization question: Do we need to retry all partitions upon a single partition error? diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java index 01fc605ea7982..4956d64228dbb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java @@ -49,6 +49,7 @@ import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; +import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; @@ -62,6 +63,7 @@ import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; @@ -176,7 +178,6 @@ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, int autoCommitIntervalMs, ConsumerInterceptors interceptors, boolean throwOnFetchStableOffsetsUnsupported, - String rackId, Optional clientTelemetryReporter) { this(rebalanceConfig, logContext, @@ -191,7 +192,6 @@ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, autoCommitIntervalMs, interceptors, throwOnFetchStableOffsetsUnsupported, - rackId, clientTelemetryReporter, Optional.empty()); } @@ -212,7 +212,6 @@ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, int autoCommitIntervalMs, ConsumerInterceptors interceptors, boolean throwOnFetchStableOffsetsUnsupported, - String rackId, Optional clientTelemetryReporter, Optional> heartbeatThreadSupplier) { super(rebalanceConfig, @@ -226,7 +225,7 @@ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, this.rebalanceConfig = rebalanceConfig; this.log = logContext.logger(ConsumerCoordinator.class); this.metadata = metadata; - this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); + this.rackId = rebalanceConfig.rackId; this.metadataSnapshot = new MetadataSnapshot(this.rackId, subscriptions, metadata.fetch(), metadata.updateVersion()); this.subscriptions = subscriptions; this.defaultOffsetCommitCallback = new DefaultOffsetCommitCallback(); @@ -1303,23 +1302,25 @@ RequestFuture sendOffsetCommitRequest(final Map sendOffsetCommitRequest(final Map> sendOffsetFetchReq return RequestFuture.coordinatorNotAvailable(); log.debug("Fetching committed offsets for partitions: {}", partitions); + // construct the request - OffsetFetchRequest.Builder requestBuilder = - new OffsetFetchRequest.Builder(this.rebalanceConfig.groupId, true, new ArrayList<>(partitions), throwOnFetchStableOffsetsUnsupported); + List topics = partitions.stream() + .collect(Collectors.groupingBy(TopicPartition::topic)) + .entrySet() + .stream() + .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(entry.getKey()) + .setPartitionIndexes(entry.getValue().stream() + .map(TopicPartition::partition) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); + + OffsetFetchRequest.Builder requestBuilder = OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(true) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(this.rebalanceConfig.groupId) + .setTopics(topics))), + throwOnFetchStableOffsetsUnsupported); // send the request with a callback return client.send(coordinator, requestBuilder) @@ -1493,64 +1512,71 @@ private OffsetFetchResponseHandler() { @Override public void handle(OffsetFetchResponse response, RequestFuture> future) { - Errors responseError = response.groupLevelError(rebalanceConfig.groupId); - if (responseError != Errors.NONE) { - log.debug("Offset fetch failed: {}", responseError.message()); + var group = response.group(rebalanceConfig.groupId); + var groupError = Errors.forCode(group.errorCode()); - if (responseError == Errors.COORDINATOR_NOT_AVAILABLE || - responseError == Errors.NOT_COORDINATOR) { + if (groupError != Errors.NONE) { + log.debug("Offset fetch failed: {}", groupError.message()); + + if (groupError == Errors.COORDINATOR_NOT_AVAILABLE || + groupError == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry - markCoordinatorUnknown(responseError); - future.raise(responseError); - } else if (responseError == Errors.GROUP_AUTHORIZATION_FAILED) { + markCoordinatorUnknown(groupError); + future.raise(groupError); + } else if (groupError == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); - } else if (responseError.exception() instanceof RetriableException) { + } else if (groupError.exception() instanceof RetriableException) { // retry - future.raise(responseError); + future.raise(groupError); } else { - future.raise(new KafkaException("Unexpected error in fetch offset response: " + responseError.message())); + future.raise(new KafkaException("Unexpected error in fetch offset response: " + groupError.message())); } return; } - Set unauthorizedTopics = null; - Map responseData = - response.partitionDataMap(rebalanceConfig.groupId); - Map offsets = new HashMap<>(responseData.size()); - Set unstableTxnOffsetTopicPartitions = new HashSet<>(); - for (Map.Entry entry : responseData.entrySet()) { - TopicPartition tp = entry.getKey(); - OffsetFetchResponse.PartitionData partitionData = entry.getValue(); - if (partitionData.hasError()) { - Errors error = partitionData.error; - log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); - - if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { - future.raise(new KafkaException("Topic or Partition " + tp + " does not exist")); - return; - } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { - if (unauthorizedTopics == null) { - unauthorizedTopics = new HashSet<>(); + var offsets = new HashMap(); + var unstableTxnOffsetTopicPartitions = new HashSet(); + var unauthorizedTopics = new HashSet(); + + for (var topic : group.topics()) { + for (var partition : topic.partitions()) { + var tp = new TopicPartition( + topic.name(), + partition.partitionIndex() + ); + var error = Errors.forCode(partition.errorCode()); + + if (error != Errors.NONE) { + log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); + + if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { + future.raise(new KafkaException("Topic or Partition " + tp + " does not exist")); + return; + } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { + unauthorizedTopics.add(tp.topic()); + } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { + unstableTxnOffsetTopicPartitions.add(tp); + } else { + future.raise(new KafkaException("Unexpected error in fetch offset response for partition " + + tp + ": " + error.message())); + return; } - unauthorizedTopics.add(tp.topic()); - } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { - unstableTxnOffsetTopicPartitions.add(tp); + } else if (partition.committedOffset() >= 0) { + // record the position with the offset (-1 indicates no committed offset to fetch); + // if there's no committed offset, record as null + offsets.put(tp, new OffsetAndMetadata( + partition.committedOffset(), + RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), + partition.metadata() + )); } else { - future.raise(new KafkaException("Unexpected error in fetch offset response for partition " + - tp + ": " + error.message())); - return; + log.info("Found no committed offset for partition {}", tp); + offsets.put(tp, null); } - } else if (partitionData.offset >= 0) { - // record the position with the offset (-1 indicates no committed offset to fetch); - // if there's no committed offset, record as null - offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); - } else { - log.info("Found no committed offset for partition {}", tp); - offsets.put(tp, null); } } - if (unauthorizedTopics != null) { + if (!unauthorizedTopics.isEmpty()) { future.raise(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // just retry diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java index 2845f4bc9ee51..c5f95305a4747 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java @@ -247,6 +247,7 @@ public void reset() { sentFields.reset(); } + @SuppressWarnings("NPathComplexity") public ConsumerGroupHeartbeatRequestData buildRequestData() { ConsumerGroupHeartbeatRequestData data = new ConsumerGroupHeartbeatRequestData(); @@ -306,6 +307,12 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { sentFields.localAssignment = local; } + // RackId - sent when joining + String rackId = membershipManager.rackId().orElse(null); + if (sendAllFields) { + data.setRackId(rackId); + } + return data; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java index 1003e39f9088b..82c209ac1283c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java @@ -44,7 +44,6 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.DEFAULT; import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.LEAVE_GROUP; @@ -112,6 +111,8 @@ public class ConsumerMembershipManager extends AbstractMembershipManager groupInstanceId; + private final Optional rackId; + /** * Rebalance timeout. To be used as time limit for the commit request issued * when a new assignment is received, that is retried until it succeeds, fails with a @@ -140,6 +141,7 @@ public class ConsumerMembershipManager extends AbstractMembershipManager groupInstanceId, + Optional rackId, int rebalanceTimeoutMs, Optional serverAssignor, SubscriptionState subscriptions, @@ -152,6 +154,7 @@ public ConsumerMembershipManager(String groupId, boolean autoCommitEnabled) { this(groupId, groupInstanceId, + rackId, rebalanceTimeoutMs, serverAssignor, subscriptions, @@ -160,13 +163,14 @@ public ConsumerMembershipManager(String groupId, logContext, backgroundEventHandler, time, - new ConsumerRebalanceMetricsManager(metrics), + new ConsumerRebalanceMetricsManager(metrics, subscriptions), autoCommitEnabled); } // Visible for testing ConsumerMembershipManager(String groupId, Optional groupInstanceId, + Optional rackId, int rebalanceTimeoutMs, Optional serverAssignor, SubscriptionState subscriptions, @@ -185,6 +189,7 @@ public ConsumerMembershipManager(String groupId, metricsManager, autoCommitEnabled); this.groupInstanceId = groupInstanceId; + this.rackId = rackId; this.rebalanceTimeoutMs = rebalanceTimeoutMs; this.serverAssignor = serverAssignor; this.commitRequestManager = commitRequestManager; @@ -199,6 +204,10 @@ public Optional groupInstanceId() { return groupInstanceId; } + public Optional rackId() { + return rackId; + } + /** * {@inheritDoc} */ @@ -218,7 +227,7 @@ public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponse response) { "already leaving the group.", memberId, memberEpoch); return; } - if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { + if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; @@ -228,6 +237,13 @@ public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponse response) { " so it's not a member of the group. ", memberId, state); return; } + if (responseData.memberEpoch() < 0) { + log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " + + "is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state, + responseData.memberEpoch()); + maybeCompleteLeaveInProgress(); + return; + } updateMemberEpoch(responseData.memberEpoch()); @@ -398,14 +414,14 @@ private void logPausedPartitionsBeingRevoked(Set partitionsToRev Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(partitionsToRevoke); if (!revokePausedPartitions.isEmpty()) { - log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("The pause flag in partitions {} will be removed due to revocation.", revokePausedPartitions); } } @Override public boolean isLeavingGroup() { CloseOptions.GroupMembershipOperation leaveGroupOperation = leaveGroupOperation(); - if (REMAIN_IN_GROUP == leaveGroupOperation) { + if (REMAIN_IN_GROUP == leaveGroupOperation && groupInstanceId.isEmpty()) { return false; } @@ -416,7 +432,8 @@ public boolean isLeavingGroup() { boolean hasLeaveOperation = DEFAULT == leaveGroupOperation || // Leave operation: both static and dynamic consumers will send a leave heartbeat LEAVE_GROUP == leaveGroupOperation || - // Remain in group: only static consumers will send a leave heartbeat, while dynamic members will not + // Remain in group: static consumers will send a leave heartbeat with -2 epoch to reflect that a member using the given + // instance id decided to leave the group and would be back within the session timeout. groupInstanceId().isPresent(); return isLeavingState && hasLeaveOperation; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java index 434e989f068e5..677beaa5fa1c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.utils.LogContext; @@ -66,14 +67,34 @@ public boolean allowAutoTopicCreation() { return allowAutoTopicCreation; } + /** + * Constructs a metadata request builder for fetching cluster metadata for the topics the consumer needs. + * This will include: + *

    + *
  • topics the consumer is subscribed to using topic names (calls to subscribe with topic name list or client-side regex)
  • + *
  • topics the consumer is subscribed to using topic IDs (calls to subscribe with broker-side regex RE2J)
  • + *
  • topics involved in calls for fetching offsets (transient topics)
  • + *
+ * Note that this will generate a request for all topics in the cluster only when the consumer is subscribed to a client-side regex. + */ @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { - if (subscription.hasPatternSubscription() || subscription.hasRe2JPatternSubscription()) + if (subscription.hasPatternSubscription()) { + // Consumer subscribed to client-side regex => request all topics to compute regex return MetadataRequest.Builder.allTopics(); + } + if (subscription.hasRe2JPatternSubscription() && transientTopics.isEmpty()) { + // Consumer subscribed to broker-side regex and no need for transient topic names metadata => request topic IDs + return MetadataRequest.Builder.forTopicIds(subscription.assignedTopicIds()); + } + // Subscription to explicit topic names or transient topics present. + // Note that in the case of RE2J broker-side regex subscription, we may end up in this path + // if there are transient topics. They are just needed temporarily (lifetime of offsets-related API calls), + // so we'll request them to unblock their APIs, then go back to requesting assigned topic IDs as needed List topics = new ArrayList<>(); topics.addAll(subscription.metadataTopics()); topics.addAll(transientTopics); - return new MetadataRequest.Builder(topics, allowAutoTopicCreation); + return MetadataRequest.Builder.forTopicNames(topics, allowAutoTopicCreation); } synchronized void addTransientTopics(Set topics) { @@ -86,6 +107,15 @@ synchronized void clearTransientTopics() { this.transientTopics.clear(); } + /** + * Check if the metadata for the topic should be retained, based on the topic name. + * It will return true for: + *
    + *
  • topic names the consumer subscribed to
  • + *
  • topic names that match a client-side regex the consumer subscribed to
  • + *
  • topics involved in fetching offsets
  • + *
+ */ @Override protected synchronized boolean retainTopic(String topic, boolean isInternal, long nowMs) { if (transientTopics.contains(topic) || subscription.needsMetadata(topic)) @@ -94,6 +124,21 @@ protected synchronized boolean retainTopic(String topic, boolean isInternal, lon if (isInternal && !includeInternalTopics) return false; - return subscription.matchesSubscribedPattern(topic) || subscription.isAssignedFromRe2j(topic); + return subscription.matchesSubscribedPattern(topic); + } + + /** + * Check if the metadata for the topic should be retained, based on topic name and topic ID. + * This will return true for: + *
    + *
  • topic names the consumer subscribed to
  • + *
  • topic names that match a client-side regex the consumer subscribed to
  • + *
  • topic IDs that have been received in an assignment from the broker after the consumer subscribed to a broker-side regex
  • + *
  • topics involved in fetching offsets
  • + *
+ */ + @Override + protected synchronized boolean retainTopic(String topicName, Uuid topicId, boolean isInternal, long nowMs) { + return retainTopic(topicName, isInternal, nowMs) || subscription.isAssignedFromRe2j(topicId); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java index 19e9a7e832094..3aa0bbcfbcf70 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Set; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; + public class ConsumerMetrics { public FetchMetricsRegistry fetcherMetrics; @@ -32,8 +34,8 @@ public ConsumerMetrics(Set metricsTags, String metricGrpPrefix) { this.fetcherMetrics = new FetchMetricsRegistry(metricsTags, metricGrpPrefix); } - public ConsumerMetrics(String metricGroupPrefix) { - this(new HashSet<>(), metricGroupPrefix); + public ConsumerMetrics() { + this(new HashSet<>(), CONSUMER_METRIC_GROUP_PREFIX); } private List getAllTemplates() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index a48289919b023..d2d178a88c38b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -35,14 +35,13 @@ import java.io.Closeable; import java.time.Duration; +import java.util.ArrayList; import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Objects; -import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.function.Supplier; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.common.utils.Utils.closeQuietly; @@ -145,6 +144,7 @@ void initializeResources() { * */ void runOnce() { + // The following code avoids use of the Java Collections Streams API to reduce overhead in this loop. processApplicationEvents(); final long currentTimeMs = time.milliseconds(); @@ -153,19 +153,24 @@ void runOnce() { } lastPollTimeMs = currentTimeMs; - final long pollWaitTimeMs = requestManagers.entries().stream() - .filter(Optional::isPresent) - .map(Optional::get) - .map(rm -> rm.poll(currentTimeMs)) - .map(networkClientDelegate::addAll) - .reduce(MAX_POLL_TIMEOUT_MS, Math::min); + long pollWaitTimeMs = MAX_POLL_TIMEOUT_MS; + + for (RequestManager rm : requestManagers.entries()) { + NetworkClientDelegate.PollResult pollResult = rm.poll(currentTimeMs); + long timeoutMs = networkClientDelegate.addAll(pollResult); + pollWaitTimeMs = Math.min(pollWaitTimeMs, timeoutMs); + } + networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs); - cachedMaximumTimeToWait = requestManagers.entries().stream() - .filter(Optional::isPresent) - .map(Optional::get) - .map(rm -> rm.maximumTimeToWait(currentTimeMs)) - .reduce(Long.MAX_VALUE, Math::min); + long maxTimeToWaitMs = Long.MAX_VALUE; + + for (RequestManager rm : requestManagers.entries()) { + long waitMs = rm.maximumTimeToWait(currentTimeMs); + maxTimeToWaitMs = Math.min(maxTimeToWaitMs, waitMs); + } + + cachedMaximumTimeToWait = maxTimeToWaitMs; reapExpiredApplicationEvents(currentTimeMs); List> uncompletedEvents = applicationEventReaper.uncompletedEvents(); @@ -233,15 +238,14 @@ private void reapExpiredApplicationEvents(long currentTimeMs) { * */ // Visible for testing - static void runAtClose(final Collection> requestManagers, + static void runAtClose(final Collection requestManagers, final NetworkClientDelegate networkClientDelegate, final long currentTimeMs) { - // These are the optional outgoing requests at the - requestManagers.stream() - .filter(Optional::isPresent) - .map(Optional::get) - .map(rm -> rm.pollOnClose(currentTimeMs)) - .forEach(networkClientDelegate::addAll); + // These are the optional outgoing requests at the time of closing the consumer + for (RequestManager rm : requestManagers) { + NetworkClientDelegate.PollResult pollResult = rm.pollOnClose(currentTimeMs); + networkClientDelegate.addAll(pollResult); + } } public boolean isRunning() { @@ -339,11 +343,20 @@ void cleanup() { log.trace("Closing the consumer network thread"); Timer timer = time.timer(closeTimeout); try { - runAtClose(requestManagers.entries(), networkClientDelegate, time.milliseconds()); + // If an error was thrown from initializeResources(), it's possible that the list of request managers + // is null, so check before using. If the request manager list is null, there wasn't any real work + // performed, so not being able to close the request managers isn't so bad. + if (requestManagers != null && networkClientDelegate != null) + runAtClose(requestManagers.entries(), networkClientDelegate, time.milliseconds()); } catch (Exception e) { log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { - sendUnsentRequests(timer); + // Likewise, if an error was thrown from initializeResources(), it's possible for the network client + // to be null, so check before using. If the network client is null, things have failed catastrophically + // enough that there aren't any outstanding requests to be sent anyway. + if (networkClientDelegate != null) + sendUnsentRequests(timer); + asyncConsumerMetrics.recordApplicationEventExpiredSize(applicationEventReaper.reap(applicationEventQueue)); closeQuietly(requestManagers, "request managers"); @@ -356,12 +369,13 @@ void cleanup() { * If there is a metadata error, complete all uncompleted events that require subscription metadata. */ private void maybeFailOnMetadataError(List> events) { - List> subscriptionMetadataEvent = events.stream() - .filter(e -> e instanceof CompletableApplicationEvent) - .map(e -> (CompletableApplicationEvent) e) - .filter(CompletableApplicationEvent::requireSubscriptionMetadata) - .collect(Collectors.toList()); - + List> subscriptionMetadataEvent = new ArrayList<>(); + + for (CompletableEvent ce : events) { + if (ce instanceof CompletableApplicationEvent && ((CompletableApplicationEvent) ce).requireSubscriptionMetadata()) + subscriptionMetadataEvent.add((CompletableApplicationEvent) ce); + } + if (subscriptionMetadataEvent.isEmpty()) return; networkClientDelegate.getAndClearMetadataError().ifPresent(metadataError -> diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java index b42cf85a8602a..3f66b6ce3c383 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java @@ -30,7 +30,6 @@ import java.util.Optional; import java.util.Set; import java.util.SortedSet; -import java.util.stream.Collectors; /** * This class encapsulates the invocation of the callback methods defined in the {@link ConsumerRebalanceListener} @@ -55,7 +54,7 @@ public class ConsumerRebalanceListenerInvoker { } public Exception invokePartitionsAssigned(final SortedSet assignedPartitions) { - log.info("Adding newly assigned partitions: {}", assignedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("Adding newly assigned partitions: {}", assignedPartitions); Optional listener = subscriptions.rebalanceListener(); @@ -67,8 +66,12 @@ public Exception invokePartitionsAssigned(final SortedSet assign } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error("User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", - listener.get().getClass().getName(), assignedPartitions, e); + log.error( + "User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", + listener.get().getClass().getName(), + assignedPartitions, + e + ); return e; } } @@ -77,11 +80,11 @@ public Exception invokePartitionsAssigned(final SortedSet assign } public Exception invokePartitionsRevoked(final SortedSet revokedPartitions) { - log.info("Revoke previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("Revoke previously assigned partitions {}", revokedPartitions); Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(revokedPartitions); if (!revokePausedPartitions.isEmpty()) - log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("The pause flag in partitions {} will be removed due to revocation.", revokePausedPartitions); Optional listener = subscriptions.rebalanceListener(); @@ -93,8 +96,12 @@ public Exception invokePartitionsRevoked(final SortedSet revoked } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error("User provided listener {} failed on invocation of onPartitionsRevoked for partitions {}", - listener.get().getClass().getName(), revokedPartitions, e); + log.error( + "User provided listener {} failed on invocation of onPartitionsRevoked for partitions {}", + listener.get().getClass().getName(), + revokedPartitions, + e + ); return e; } } @@ -103,11 +110,11 @@ public Exception invokePartitionsRevoked(final SortedSet revoked } public Exception invokePartitionsLost(final SortedSet lostPartitions) { - log.info("Lost previously assigned partitions {}", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("Lost previously assigned partitions {}", lostPartitions); Set lostPausedPartitions = subscriptions.pausedPartitions(); lostPausedPartitions.retainAll(lostPartitions); if (!lostPausedPartitions.isEmpty()) - log.info("The pause flag in partitions [{}] will be removed due to partition lost.", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("The pause flag in partitions {} will be removed due to partition lost.", lostPartitions); Optional listener = subscriptions.rebalanceListener(); @@ -119,8 +126,12 @@ public Exception invokePartitionsLost(final SortedSet lostPartit } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error("User provided listener {} failed on invocation of onPartitionsLost for partitions {}", - listener.get().getClass().getName(), lostPartitions, e); + log.error( + "User provided listener {} failed on invocation of onPartitionsLost for partitions {}", + listener.get().getClass().getName(), + lostPartitions, + e + ); return e; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index e4b0fa924c0d2..c07a6747559c7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -69,6 +69,7 @@ public final class ConsumerUtils { public static final String COORDINATOR_METRICS_SUFFIX = "-coordinator-metrics"; public static final String CONSUMER_METRICS_SUFFIX = "-metrics"; public static final String CONSUMER_METRIC_GROUP = CONSUMER_METRIC_GROUP_PREFIX + CONSUMER_METRICS_SUFFIX; + public static final String CONSUMER_SHARE_METRIC_GROUP = CONSUMER_SHARE_METRIC_GROUP_PREFIX + CONSUMER_METRICS_SUFFIX; /** * A fixed, large enough value will suffice for max. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java index 23adf9c9afaaa..6cf5bc301b370 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java @@ -27,6 +27,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; @@ -51,7 +52,7 @@ public class FetchBuffer implements AutoCloseable { private final Logger log; private final ConcurrentLinkedQueue completedFetches; private final Lock lock; - private final Condition notEmptyCondition; + private final Condition blockingCondition; private final IdempotentCloser idempotentCloser = new IdempotentCloser(); private final AtomicBoolean wokenup = new AtomicBoolean(false); @@ -62,7 +63,7 @@ public FetchBuffer(final LogContext logContext) { this.log = logContext.logger(FetchBuffer.class); this.completedFetches = new ConcurrentLinkedQueue<>(); this.lock = new ReentrantLock(); - this.notEmptyCondition = lock.newCondition(); + this.blockingCondition = lock.newCondition(); } /** @@ -95,13 +96,7 @@ boolean hasCompletedFetches(Predicate predicate) { } void add(CompletedFetch completedFetch) { - try { - lock.lock(); - completedFetches.add(completedFetch); - notEmptyCondition.signalAll(); - } finally { - lock.unlock(); - } + addAll(List.of(completedFetch)); } void addAll(Collection completedFetches) { @@ -111,7 +106,8 @@ void addAll(Collection completedFetches) { try { lock.lock(); this.completedFetches.addAll(completedFetches); - notEmptyCondition.signalAll(); + wokenup.set(true); + blockingCondition.signalAll(); } finally { lock.unlock(); } @@ -154,23 +150,23 @@ CompletedFetch poll() { } /** - * Allows the caller to await presence of data in the buffer. The method will block, returning only + * Allows the caller to await a response from the broker for requested data. The method will block, returning only * under one of the following conditions: * *
    - *
  1. The buffer was already non-empty on entry
  2. - *
  3. The buffer was populated during the wait
  4. + *
  5. The buffer was already woken
  6. + *
  7. The buffer was woken during the wait
  8. *
  9. The remaining time on the {@link Timer timer} elapsed
  10. *
  11. The thread was interrupted
  12. *
* * @param timer Timer that provides time to wait */ - void awaitNotEmpty(Timer timer) { + void awaitWakeup(Timer timer) { try { lock.lock(); - while (isEmpty() && !wokenup.compareAndSet(true, false)) { + while (!wokenup.compareAndSet(true, false)) { // Update the timer before we head into the loop in case it took a while to get the lock. timer.update(); @@ -185,7 +181,7 @@ void awaitNotEmpty(Timer timer) { break; } - if (!notEmptyCondition.await(timer.remainingMs(), TimeUnit.MILLISECONDS)) { + if (!blockingCondition.await(timer.remainingMs(), TimeUnit.MILLISECONDS)) { break; } } @@ -198,10 +194,10 @@ void awaitNotEmpty(Timer timer) { } void wakeup() { - wokenup.set(true); try { lock.lock(); - notEmptyCondition.signalAll(); + wokenup.set(true); + blockingCondition.signalAll(); } finally { lock.unlock(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java index 2aa8aeaaffbad..bbe216c2fc837 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java @@ -155,7 +155,10 @@ private Fetch fetchRecords(final CompletedFetch nextInLineFetch, int maxRe log.debug("Not returning fetched records for partition {} since it is no longer assigned", tp); } else if (!subscriptions.isFetchable(tp)) { // this can happen when a partition is paused before fetched records are returned to the consumer's - // poll call or if the offset is being reset + // poll call or if the offset is being reset. + // It can also happen under the Consumer rebalance protocol, when the consumer changes its subscription. + // Until the consumer receives an updated assignment from the coordinator, it can hold assigned partitions + // that are not in the subscription anymore, so we make them not fetchable. log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", tp); } else { SubscriptionState.FetchPosition position = subscriptions.position(tp); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java index ac86d1ebeaab0..35d735d56c77a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java @@ -138,7 +138,7 @@ protected void maybeCloseFetchSessions(final Timer timer) { // here. log.debug("All requests couldn't be sent in the specific timeout period {}ms. " + "This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " + - "KafkaConsumer.close(Duration timeout)", timer.timeoutMs()); + "KafkaConsumer.close(...)", timer.timeoutMs()); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java index cab7d804cadd6..ae39753f3d8e8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java @@ -60,7 +60,7 @@ public class RequestManagers implements Closeable { public final FetchRequestManager fetchRequestManager; public final Optional shareConsumeRequestManager; public final Optional streamsGroupHeartbeatRequestManager; - private final List> entries; + private final List entries; private final IdempotentCloser closer = new IdempotentCloser(); public RequestManagers(LogContext logContext, @@ -87,16 +87,16 @@ public RequestManagers(LogContext logContext, this.streamsMembershipManager = streamsMembershipManager; this.shareMembershipManager = Optional.empty(); - List> list = new ArrayList<>(); - list.add(coordinatorRequestManager); - list.add(commitRequestManager); - list.add(heartbeatRequestManager); - list.add(membershipManager); - list.add(streamsGroupHeartbeatRequestManager); - list.add(streamsMembershipManager); - list.add(Optional.of(offsetsRequestManager)); - list.add(Optional.of(topicMetadataRequestManager)); - list.add(Optional.of(fetchRequestManager)); + List list = new ArrayList<>(); + coordinatorRequestManager.ifPresent(list::add); + commitRequestManager.ifPresent(list::add); + heartbeatRequestManager.ifPresent(list::add); + membershipManager.ifPresent(list::add); + streamsGroupHeartbeatRequestManager.ifPresent(list::add); + streamsMembershipManager.ifPresent(list::add); + list.add(offsetsRequestManager); + list.add(topicMetadataRequestManager); + list.add(fetchRequestManager); entries = Collections.unmodifiableList(list); } @@ -119,15 +119,15 @@ public RequestManagers(LogContext logContext, this.topicMetadataRequestManager = null; this.fetchRequestManager = null; - List> list = new ArrayList<>(); - list.add(coordinatorRequestManager); - list.add(shareHeartbeatRequestManager); - list.add(shareMembershipManager); - list.add(Optional.of(shareConsumeRequestManager)); + List list = new ArrayList<>(); + coordinatorRequestManager.ifPresent(list::add); + shareHeartbeatRequestManager.ifPresent(list::add); + shareMembershipManager.ifPresent(list::add); + list.add(shareConsumeRequestManager); entries = Collections.unmodifiableList(list); } - public List> entries() { + public List entries() { return entries; } @@ -138,8 +138,6 @@ public void close() { log.debug("Closing RequestManagers"); entries.stream() - .filter(Optional::isPresent) - .map(Optional::get) .filter(rm -> rm instanceof Closeable) .map(rm -> (Closeable) rm) .forEach(c -> closeQuietly(c, c.getClass().getSimpleName())); @@ -250,6 +248,7 @@ protected RequestManagers create() { membershipManager = new ConsumerMembershipManager( groupRebalanceConfig.groupId, groupRebalanceConfig.groupInstanceId, + groupRebalanceConfig.rackId, groupRebalanceConfig.rebalanceTimeoutMs, serverAssignor, subscriptions, @@ -343,7 +342,7 @@ protected RequestManagers create() { ShareMembershipManager shareMembershipManager = new ShareMembershipManager( logContext, groupRebalanceConfig.groupId, - null, + groupRebalanceConfig.rackId.orElse(null), subscriptions, metadata, time, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java index 83bae92d48a85..2c337782dd415 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java @@ -41,11 +41,13 @@ import java.io.Closeable; import java.nio.ByteBuffer; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Optional; +import java.util.Set; /** * {@link ShareCompletedFetch} represents a {@link RecordBatch batch} of {@link Record records} @@ -152,7 +154,7 @@ void recordAggregatedMetrics(int bytes, int records) { * @param maxRecords The number of records to return; the number returned may be {@code 0 <= maxRecords} * @param checkCrcs Whether to check the CRC of fetched records * - * @return {@link ShareInFlightBatch The ShareInFlightBatch containing records and their acknowledgments} + * @return {@link ShareInFlightBatch The ShareInFlightBatch containing records and their acknowledgements} */ ShareInFlightBatch fetchRecords(final Deserializers deserializers, final int maxRecords, @@ -162,15 +164,15 @@ ShareInFlightBatch fetchRecords(final Deserializers deseriali if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. - rejectRecordBatch(inFlightBatch, currentBatch); - inFlightBatch.setException(cachedBatchException); + Set offsets = rejectRecordBatch(inFlightBatch, currentBatch); + inFlightBatch.setException(new ShareInFlightBatchException(cachedBatchException, offsets)); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); - inFlightBatch.setException(cachedRecordException); + inFlightBatch.setException(new ShareInFlightBatchException(cachedRecordException, Set.of(lastRecord.offset()))); cachedRecordException = null; return inFlightBatch; } @@ -224,7 +226,7 @@ ShareInFlightBatch fetchRecords(final Deserializers deseriali nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); - inFlightBatch.setException(se); + inFlightBatch.setException(new ShareInFlightBatchException(se, Set.of(lastRecord.offset()))); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); @@ -232,8 +234,8 @@ ShareInFlightBatch fetchRecords(final Deserializers deseriali } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. - rejectRecordBatch(inFlightBatch, currentBatch); - inFlightBatch.setException(e); + Set offsets = rejectRecordBatch(inFlightBatch, currentBatch); + inFlightBatch.setException(new ShareInFlightBatchException(e, offsets)); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); @@ -261,12 +263,13 @@ private OffsetAndDeliveryCount nextAcquiredRecord() { return null; } - private void rejectRecordBatch(final ShareInFlightBatch inFlightBatch, + private Set rejectRecordBatch(final ShareInFlightBatch inFlightBatch, final RecordBatch currentBatch) { // Rewind the acquiredRecordIterator to the start, so we are in a known state acquiredRecordIterator = acquiredRecordList.listIterator(); OffsetAndDeliveryCount nextAcquired = nextAcquiredRecord(); + Set offsets = new HashSet<>(); for (long offset = currentBatch.baseOffset(); offset <= currentBatch.lastOffset(); offset++) { if (nextAcquired == null) { // No more acquired records, so we are done @@ -274,6 +277,7 @@ private void rejectRecordBatch(final ShareInFlightBatch inFlightBat } else if (offset == nextAcquired.offset) { // It's acquired, so we reject it inFlightBatch.addAcknowledgement(offset, AcknowledgeType.REJECT); + offsets.add(offset); } else if (offset < nextAcquired.offset) { // It's not acquired, so we skip it continue; @@ -281,6 +285,7 @@ private void rejectRecordBatch(final ShareInFlightBatch inFlightBat nextAcquired = nextAcquiredRecord(); } + return offsets; } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java index b36b778546b86..51e3fb39dfb0e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java @@ -175,22 +175,25 @@ public PollResult poll(long currentTimeMs) { TopicIdPartition tip = new TopicIdPartition(topicId, partition); Acknowledgements acknowledgementsToSend = null; + boolean canSendAcknowledgements = true; + Map nodeAcksFromFetchMap = fetchAcknowledgementsToSend.get(node.id()); if (nodeAcksFromFetchMap != null) { acknowledgementsToSend = nodeAcksFromFetchMap.remove(tip); + if (acknowledgementsToSend != null) { - if (handler.isNewSession()) { - // Failing the acknowledgements as we cannot have piggybacked acknowledgements in the initial ShareFetchRequest. - acknowledgementsToSend.complete(Errors.INVALID_SHARE_SESSION_EPOCH.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, acknowledgementsToSend)); - } else { - metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); - fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acknowledgementsToSend); + // Check if the share session epoch is valid for sending acknowledgements. + if (!maybeAddAcknowledgements(handler, node, tip, acknowledgementsToSend)) { + canSendAcknowledgements = false; } } } - handler.addPartitionToFetch(tip, acknowledgementsToSend); + if (canSendAcknowledgements) { + handler.addPartitionToFetch(tip, acknowledgementsToSend); + } else { + handler.addPartitionToFetch(tip, null); + } topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); log.debug("Added fetch request for partition {} to node {}", tip, node.id()); @@ -212,8 +215,10 @@ public PollResult poll(long currentTimeMs) { if (nodeAcksFromFetchMap != null) { nodeAcksFromFetchMap.forEach((tip, acks) -> { if (!isLeaderKnownToHaveChanged(nodeId, tip)) { - metricsManager.recordAcknowledgementSent(acks.size()); - fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acks); + // Check if the share session epoch is valid for sending acknowledgements. + if (!maybeAddAcknowledgements(sessionHandler, node, tip, acks)) { + return; + } sessionHandler.addPartitionToAcknowledgeOnly(tip, acks); handlerMap.put(node, sessionHandler); @@ -256,6 +261,28 @@ public PollResult poll(long currentTimeMs) { return new PollResult(requests); } + /** + * + * @return True if we can add acknowledgements to the share session. + * If we cannot add acknowledgements, they are completed with {@link Errors#INVALID_SHARE_SESSION_EPOCH} exception. + */ + private boolean maybeAddAcknowledgements(ShareSessionHandler handler, + Node node, + TopicIdPartition tip, + Acknowledgements acknowledgements) { + if (handler.isNewSession()) { + // Failing the acknowledgements as we cannot have piggybacked acknowledgements in the initial ShareFetchRequest. + log.debug("Cannot send acknowledgements on initial epoch for ShareSession for partition {}", tip); + acknowledgements.complete(Errors.INVALID_SHARE_SESSION_EPOCH.exception()); + maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, acknowledgements)); + return false; + } else { + metricsManager.recordAcknowledgementSent(acknowledgements.size()); + fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acknowledgements); + return true; + } + } + public void fetch(Map acknowledgementsMap, Map controlRecordAcknowledgements) { if (!fetchMoreRecords) { @@ -731,6 +758,16 @@ private void handleShareFetchSuccess(Node fetchTarget, if (response.error() == Errors.UNKNOWN_TOPIC_ID) { metadata.requestUpdate(false); } + // Complete any inFlight acknowledgements with the error code from the response. + Map nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id()); + if (nodeAcknowledgementsInFlight != null) { + nodeAcknowledgementsInFlight.forEach((tip, acks) -> { + acks.complete(Errors.forCode(response.error().code()).exception()); + metricsManager.recordFailedAcknowledgements(acks.size()); + }); + maybeSendShareAcknowledgeCommitCallbackEvent(nodeAcknowledgementsInFlight); + nodeAcknowledgementsInFlight.clear(); + } return; } @@ -1049,6 +1086,7 @@ boolean hasCompletedFetches() { protected void closeInternal() { Utils.closeQuietly(shareFetchBuffer, "shareFetchBuffer"); + Utils.closeQuietly(metricsManager, "shareFetchMetricsManager"); } public void close() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java index 625f6abf0cd38..9eb5fd13699b0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java @@ -45,7 +45,7 @@ public ShareConsumerDelegate create(final ConsumerConfig config, try { LogContext logContext = new LogContext(); Logger log = logContext.logger(getClass()); - log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); + log.warn("Share groups and KafkaShareConsumer are part of a preview feature introduced by KIP-932, and are not recommended for use in production."); return new ShareConsumerImpl<>(config, keyDeserializer, valueDeserializer); } catch (KafkaException e) { throw e; @@ -66,7 +66,7 @@ public ShareConsumerDelegate create(final LogContext logContext, final ConsumerMetadata metadata) { try { Logger log = logContext.logger(getClass()); - log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); + log.warn("Share groups and KafkaShareConsumer are part of a preview feature introduced by KIP-932, and are not recommended for use in production."); return new ShareConsumerImpl<>( logContext, clientId, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index bb5193f8dc6b9..12b01b5482e32 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -100,7 +100,7 @@ import java.util.function.Supplier; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createShareFetchMetricsManager; @@ -170,6 +170,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { private final String clientId; private final String groupId; private final BlockingQueue backgroundEventQueue; + private final BackgroundEventHandler backgroundEventHandler; private final BackgroundEventProcessor backgroundEventProcessor; private final CompletableEventReaper backgroundEventReaper; private final Deserializers deserializers; @@ -247,7 +248,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); this.acknowledgementMode = initializeAcknowledgementMode(config, log); this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); @@ -263,7 +264,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { ShareFetchMetricsManager shareFetchMetricsManager = createShareFetchMetricsManager(metrics); ApiVersions apiVersions = new ApiVersions(); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( + this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics); // This FetchBuffer is shared between the application and network threads. @@ -323,7 +324,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { new FetchConfig(config), deserializers); - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); config.logUnused(); AppInfoParser.registerAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -366,7 +367,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.fetchBuffer = new ShareFetchBuffer(logContext); this.completedAcknowledgements = new LinkedList<>(); - ShareConsumerMetrics metricsRegistry = new ShareConsumerMetrics(CONSUMER_SHARE_METRIC_GROUP_PREFIX); + ShareConsumerMetrics metricsRegistry = new ShareConsumerMetrics(); ShareFetchMetricsManager shareFetchMetricsManager = new ShareFetchMetricsManager(metrics, metricsRegistry.shareFetchMetrics); this.fetchCollector = new ShareFetchCollector<>( logContext, @@ -374,12 +375,12 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { subscriptions, new FetchConfig(config), deserializers); - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - final BlockingQueue backgroundEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( + this.backgroundEventQueue = new LinkedBlockingQueue<>(); + this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics); final Supplier networkClientDelegateSupplier = @@ -419,7 +420,6 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { requestManagersSupplier, asyncConsumerMetrics); - this.backgroundEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); @@ -464,10 +464,12 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.currentFetch = ShareFetch.empty(); this.applicationEventHandler = applicationEventHandler; - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); this.clientTelemetryReporter = Optional.empty(); this.completedAcknowledgements = Collections.emptyList(); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); + this.backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, time, asyncConsumerMetrics); } // auxiliary interface for testing @@ -561,13 +563,14 @@ public void unsubscribe() { * {@inheritDoc} */ @Override + @SuppressWarnings("unchecked") public synchronized ConsumerRecords poll(final Duration timeout) { Timer timer = time.timer(timeout); acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(); + handleCompletedAcknowledgements(false); // If using implicit acknowledgement, acknowledge the previously fetched records acknowledgeBatchIfImplicitAcknowledgement(); @@ -601,6 +604,9 @@ public synchronized ConsumerRecords poll(final Duration timeout) { } while (timer.notExpired()); return ConsumerRecords.empty(); + } catch (ShareFetchException e) { + currentFetch = (ShareFetch) e.shareFetch(); + throw e.cause(); } finally { kafkaShareConsumerMetrics.recordPollEnd(timer.currentTimeMs()); release(); @@ -692,6 +698,19 @@ public void acknowledge(final ConsumerRecord record, final AcknowledgeType } } + /** + * {@inheritDoc} + */ + public void acknowledge(final String topic, final int partition, final long offset, final AcknowledgeType type) { + acquireAndEnsureOpen(); + try { + ensureExplicitAcknowledgement(); + currentFetch.acknowledge(topic, partition, offset, type); + } finally { + release(); + } + } + /** * {@inheritDoc} */ @@ -708,7 +727,7 @@ public Map> commitSync(final Duration acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(); + handleCompletedAcknowledgements(false); // If using implicit acknowledgement, acknowledge the previously fetched records acknowledgeBatchIfImplicitAcknowledgement(); @@ -752,7 +771,7 @@ public void commitAsync() { acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(); + handleCompletedAcknowledgements(false); // If using implicit acknowledgement, acknowledge the previously fetched records acknowledgeBatchIfImplicitAcknowledgement(); @@ -883,7 +902,7 @@ private void close(final Duration timeout, final boolean swallowException) { swallow(log, Level.ERROR, "Failed to stop finding coordinator", this::stopFindCoordinatorOnClose, firstException); swallow(log, Level.ERROR, "Failed invoking acknowledgement commit callback", - this::handleCompletedAcknowledgements, firstException); + () -> handleCompletedAcknowledgements(true), firstException); if (applicationEventHandler != null) closeQuietly(() -> applicationEventHandler.close(Duration.ofMillis(closeTimer.remainingMs())), "Failed shutting down network thread", firstException); closeTimer.update(); @@ -911,6 +930,9 @@ private void close(final Duration timeout, final boolean swallowException) { } private void stopFindCoordinatorOnClose() { + if (applicationEventHandler == null) { + return; + } log.debug("Stop finding coordinator during consumer close"); applicationEventHandler.add(new StopFindCoordinatorOnCloseEvent()); } @@ -927,6 +949,10 @@ private Timer createTimerForCloseRequests(Duration timeout) { * 2. leave the group */ private void sendAcknowledgementsAndLeaveGroup(final Timer timer, final AtomicReference firstException) { + if (applicationEventHandler == null || backgroundEventProcessor == null || + backgroundEventReaper == null || backgroundEventQueue == null) { + return; + } completeQuietly( () -> applicationEventHandler.addAndGet(new ShareAcknowledgeOnCloseEvent(acknowledgementsToSend(), calculateDeadlineMs(timer))), "Failed to send pending acknowledgements with a timeout(ms)=" + timer.timeoutMs(), firstException); @@ -1017,8 +1043,15 @@ private void maybeThrowInvalidGroupIdException() { *

* If the acknowledgement commit callback throws an exception, this method will throw an exception. */ - private void handleCompletedAcknowledgements() { - processBackgroundEvents(); + private void handleCompletedAcknowledgements(boolean onClose) { + if (backgroundEventQueue == null || backgroundEventReaper == null || backgroundEventProcessor == null) { + return; + } + // If the user gets any fatal errors, they will get these exceptions in the background queue. + // While closing, we ignore these exceptions so that the consumers close successfully. + processBackgroundEvents(onClose ? e -> (e instanceof GroupAuthorizationException + || e instanceof TopicAuthorizationException + || e instanceof InvalidTopicException) : e -> false); if (!completedAcknowledgements.isEmpty()) { try { @@ -1065,30 +1098,44 @@ private static ShareAcknowledgementMode initializeAcknowledgementMode(ConsumerCo return ShareAcknowledgementMode.fromString(s); } + private void processBackgroundEvents(final Predicate ignoreErrorEventException) { + try { + processBackgroundEvents(); + } catch (Exception e) { + if (!ignoreErrorEventException.test(e)) + throw e; + } + } + /** * Process the events—if any—that were produced by the {@link ConsumerNetworkThread network thread}. * It is possible that {@link ErrorEvent an error} * could occur when processing the events. In such cases, the processor will take a reference to the first * error, continue to process the remaining events, and then throw the first error that occurred. + * + * Visible for testing. */ - private boolean processBackgroundEvents() { + boolean processBackgroundEvents() { AtomicReference firstError = new AtomicReference<>(); - LinkedList events = new LinkedList<>(); - backgroundEventQueue.drainTo(events); - - for (BackgroundEvent event : events) { - try { - if (event instanceof CompletableEvent) - backgroundEventReaper.add((CompletableEvent) event); + List events = backgroundEventHandler.drainEvents(); + if (!events.isEmpty()) { + long startMs = time.milliseconds(); + for (BackgroundEvent event : events) { + asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); + try { + if (event instanceof CompletableEvent) + backgroundEventReaper.add((CompletableEvent) event); - backgroundEventProcessor.process(event); - } catch (Throwable t) { - KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); + backgroundEventProcessor.process(event); + } catch (Throwable t) { + KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); - if (!firstError.compareAndSet(null, e)) - log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); + if (!firstError.compareAndSet(null, e)) + log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); + } } + asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); @@ -1190,6 +1237,10 @@ public Metrics metricsRegistry() { return metrics; } + AsyncConsumerMetrics asyncConsumerMetrics() { + return asyncConsumerMetrics; + } + @Override public KafkaShareConsumerMetrics kafkaShareConsumerMetrics() { return kafkaShareConsumerMetrics; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java index 41a41818deea4..ee02dfcc17b56 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java @@ -19,6 +19,8 @@ import java.util.HashSet; import java.util.Set; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; + public class ShareConsumerMetrics { public ShareFetchMetricsRegistry shareFetchMetrics; @@ -26,7 +28,7 @@ public ShareConsumerMetrics(Set metricsTags, String metricGrpPrefix) { this.shareFetchMetrics = new ShareFetchMetricsRegistry(metricsTags, metricGrpPrefix); } - public ShareConsumerMetrics(String metricGroupPrefix) { - this(new HashSet<>(), metricGroupPrefix); + public ShareConsumerMetrics() { + this(new HashSet<>(), CONSUMER_SHARE_METRIC_GROUP_PREFIX); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java index eb79fa79c40c1..406110fe5024f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java @@ -112,7 +112,7 @@ public boolean isEmpty() { * @param record The record to acknowledge * @param type The acknowledge type which indicates whether it was processed successfully */ - public void acknowledge(final ConsumerRecord record, AcknowledgeType type) { + public void acknowledge(final ConsumerRecord record, final AcknowledgeType type) { for (Map.Entry> tipBatch : batches.entrySet()) { TopicIdPartition tip = tipBatch.getKey(); if (tip.topic().equals(record.topic()) && (tip.partition() == record.partition())) { @@ -123,6 +123,29 @@ public void acknowledge(final ConsumerRecord record, AcknowledgeType type) throw new IllegalStateException("The record cannot be acknowledged."); } + /** + * Acknowledge a single record by its topic, partition and offset in the current batch. + * + * @param topic The topic of the record to acknowledge + * @param partition The partition of the record + * @param offset The offset of the record + * @param type The acknowledge type which indicates whether it was processed successfully + */ + public void acknowledge(final String topic, final int partition, final long offset, final AcknowledgeType type) { + for (Map.Entry> tipBatch : batches.entrySet()) { + TopicIdPartition tip = tipBatch.getKey(); + ShareInFlightBatchException exception = tipBatch.getValue().getException(); + if (tip.topic().equals(topic) && (tip.partition() == partition) && + exception != null && + exception.offsets().contains(offset)) { + + tipBatch.getValue().addAcknowledgement(offset, type); + return; + } + } + throw new IllegalStateException("The record cannot be acknowledged."); + } + /** * Acknowledge all records in the current batch. If any records in the batch already have * been acknowledged, those acknowledgements are not overwritten. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java index 3d073fa92eb82..c2a17d051b17e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java @@ -112,7 +112,7 @@ public ShareFetch collect(final ShareFetchBuffer fetchBuffer) { fetch.add(tp, batch); if (batch.getException() != null) { - throw batch.getException(); + throw new ShareFetchException(fetch, batch.getException().cause()); } else if (batch.hasCachedException()) { break; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchException.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchException.java new file mode 100644 index 0000000000000..5e904e2506865 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchException.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.errors.SerializationException; + +public class ShareFetchException extends SerializationException { + + private final ShareFetch shareFetch; + + private final KafkaException cause; + + public ShareFetchException(ShareFetch shareFetch, KafkaException cause) { + this.shareFetch = shareFetch; + this.cause = cause; + } + + public ShareFetch shareFetch() { + return shareFetch; + } + + public KafkaException cause() { + return cause; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java index 249edc6aa2747..d3e60a3dfaaee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java @@ -20,7 +20,10 @@ import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.WindowedCount; -public class ShareFetchMetricsManager { +import java.io.IOException; +import java.util.Arrays; + +public class ShareFetchMetricsManager implements AutoCloseable { private final Metrics metrics; private final Sensor throttleTime; private final Sensor bytesFetched; @@ -92,4 +95,16 @@ void recordAcknowledgementSent(int acknowledgements) { void recordFailedAcknowledgements(int acknowledgements) { failedAcknowledgements.record(acknowledgements); } + + @Override + public void close() throws IOException { + Arrays.asList( + throttleTime.name(), + bytesFetched.name(), + recordsFetched.name(), + fetchLatency.name(), + sentAcknowledgements.name(), + failedAcknowledgements.name() + ).forEach(metrics::removeSensor); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java index 21d598afb4cbe..f46b6f72c87e1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java @@ -54,6 +54,9 @@ public class ShareHeartbeatRequestManager extends AbstractHeartbeatRequestManage public static final String SHARE_PROTOCOL_NOT_SUPPORTED_MSG = "The cluster does not support the share group protocol. " + "To use share groups, the cluster must have the share group protocol enabled."; + public static final String SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG = "The cluster does not support the share group protocol " + + "using ShareGroupHeartbeat API version 1 or later. This version of the API was introduced in Apache Kafka v4.1."; + public ShareHeartbeatRequestManager( final LogContext logContext, final Time time, @@ -93,8 +96,8 @@ public ShareHeartbeatRequestManager( public boolean handleSpecificFailure(Throwable exception) { boolean errorHandled = false; if (exception instanceof UnsupportedVersionException) { - logger.error("{} failed due to {}: {}", heartbeatRequestName(), exception.getMessage(), SHARE_PROTOCOL_NOT_SUPPORTED_MSG); - handleFatalFailure(new UnsupportedVersionException(SHARE_PROTOCOL_NOT_SUPPORTED_MSG, exception)); + logger.error("{} failed due to {}: {}", heartbeatRequestName(), exception.getMessage(), SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG); + handleFatalFailure(new UnsupportedVersionException(SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG, exception)); errorHandled = true; } return errorHandled; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java index b2d6fad17fdcf..0fa0499aa1fba 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; import java.util.ArrayList; @@ -34,7 +33,7 @@ public class ShareInFlightBatch { private final Map> inFlightRecords; private final Set acknowledgedRecords; private Acknowledgements acknowledgements; - private KafkaException exception; + private ShareInFlightBatchException exception; private boolean hasCachedException = false; public ShareInFlightBatch(int nodeId, TopicIdPartition partition) { @@ -102,6 +101,7 @@ Acknowledgements takeAcknowledgedRecords() { acknowledgedRecords.forEach(inFlightRecords::remove); } acknowledgedRecords.clear(); + exception = null; Acknowledgements currentAcknowledgements = acknowledgements; acknowledgements = Acknowledgements.empty(); @@ -116,11 +116,11 @@ public boolean isEmpty() { return inFlightRecords.isEmpty() && acknowledgements.isEmpty(); } - public void setException(KafkaException exception) { + public void setException(ShareInFlightBatchException exception) { this.exception = exception; } - public KafkaException getException() { + public ShareInFlightBatchException getException() { return exception; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatchException.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatchException.java new file mode 100644 index 0000000000000..bd8fa2602e095 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatchException.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.errors.SerializationException; + +import java.util.Set; + +public class ShareInFlightBatchException extends SerializationException { + + private final KafkaException cause; + + private final Set offsets; + + public ShareInFlightBatchException(KafkaException cause, Set offsets) { + this.cause = cause; + this.offsets = offsets; + } + + public KafkaException cause() { + return cause; + } + + public Set offsets() { + return offsets; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java index d7944466130a4..47ab87edb358d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java @@ -137,7 +137,7 @@ public void onHeartbeatSuccess(ShareGroupHeartbeatResponse response) { "already leaving the group.", memberId, memberEpoch); return; } - if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { + if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; @@ -147,6 +147,13 @@ public void onHeartbeatSuccess(ShareGroupHeartbeatResponse response) { " so it's not a member of the group. ", memberId, state); return; } + if (responseData.memberEpoch() < 0) { + log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " + + "is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state, + responseData.memberEpoch()); + maybeCompleteLeaveInProgress(); + return; + } updateMemberEpoch(responseData.memberEpoch()); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java index 34a109944bea5..634a9839c5d00 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java @@ -219,7 +219,8 @@ private String topicIdPartitionsToLogString(Collection partiti */ public boolean handleResponse(ShareFetchResponse response, short version) { if ((response.error() == Errors.SHARE_SESSION_NOT_FOUND) || - (response.error() == Errors.INVALID_SHARE_SESSION_EPOCH)) { + (response.error() == Errors.INVALID_SHARE_SESSION_EPOCH) || + (response.error() == Errors.SHARE_SESSION_LIMIT_REACHED)) { log.info("Node {} was unable to process the ShareFetch request with {}: {}.", node, nextMetadata, response.error()); nextMetadata = nextMetadata.nextCloseExistingAttemptNew(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java index 5012aba5a32b0..ceeeb6c191607 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java @@ -52,7 +52,7 @@ import static org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult.EMPTY; /** - *

Manages the request creation and response handling for the Streams group heartbeat. The class creates a + *

Manages the request creation and response handling for the streams group heartbeat. The class creates a * heartbeat request using the state stored in the membership manager. The requests can be retrieved * by calling {@link StreamsGroupHeartbeatRequestManager#poll(long)}. Once the response is received, it updates the * state in the membership manager and handles any errors. @@ -88,6 +88,8 @@ void reset() { private final int rebalanceTimeoutMs; private final StreamsRebalanceData streamsRebalanceData; private final LastSentFields lastSentFields = new LastSentFields(); + private int endpointInformationEpoch = -1; + public HeartbeatState(final StreamsRebalanceData streamsRebalanceData, final StreamsMembershipManager membershipManager, @@ -101,11 +103,20 @@ public void reset() { lastSentFields.reset(); } + public int endpointInformationEpoch() { + return endpointInformationEpoch; + } + + public void setEndpointInformationEpoch(int endpointInformationEpoch) { + this.endpointInformationEpoch = endpointInformationEpoch; + } + public StreamsGroupHeartbeatRequestData buildRequestData() { StreamsGroupHeartbeatRequestData data = new StreamsGroupHeartbeatRequestData(); data.setGroupId(membershipManager.groupId()); data.setMemberId(membershipManager.memberId()); data.setMemberEpoch(membershipManager.memberEpoch()); + data.setEndpointInformationEpoch(endpointInformationEpoch); membershipManager.groupInstanceId().ifPresent(data::setInstanceId); boolean joining = membershipManager.state() == MemberState.JOINING; @@ -276,7 +287,7 @@ private static List getChangelogTopi private final HeartbeatMetricsManager metricsManager; - private StreamsRebalanceData streamsRebalanceData; + private final StreamsRebalanceData streamsRebalanceData; /** * Timer for tracking the time since the last consumer poll. If the timer expires, the consumer will stop @@ -515,17 +526,21 @@ private void onSuccessResponse(final StreamsGroupHeartbeatResponse response, fin final StreamsGroupHeartbeatResponseData data = response.data(); heartbeatRequestState.updateHeartbeatIntervalMs(data.heartbeatIntervalMs()); heartbeatRequestState.onSuccessfulAttempt(currentTimeMs); + heartbeatState.setEndpointInformationEpoch(data.endpointInformationEpoch()); if (data.partitionsByUserEndpoint() != null) { streamsRebalanceData.setPartitionsByHost(convertHostInfoMap(data)); } List statuses = data.status(); - if (statuses != null && !statuses.isEmpty()) { - String statusDetails = statuses.stream() - .map(status -> "(" + status.statusCode() + ") " + status.statusDetail()) - .collect(Collectors.joining(", ")); - logger.warn("Membership is in the following statuses: {}", statusDetails); + if (statuses != null) { + streamsRebalanceData.setStatuses(statuses); + if (!statuses.isEmpty()) { + String statusDetails = statuses.stream() + .map(status -> "(" + status.statusCode() + ") " + status.statusDetail()) + .collect(Collectors.joining(", ")); + logger.warn("Membership is in the following statuses: {}", statusDetails); + } } membershipManager.onHeartbeatSuccess(response); @@ -671,16 +686,24 @@ private void handleFatalFailure(Throwable error) { membershipManager.transitionToFatal(); } - private static Map> convertHostInfoMap(final StreamsGroupHeartbeatResponseData data) { - Map> partitionsByHost = new HashMap<>(); + private static Map convertHostInfoMap( + final StreamsGroupHeartbeatResponseData data) { + Map partitionsByHost = new HashMap<>(); data.partitionsByUserEndpoint().forEach(endpoint -> { - List topicPartitions = endpoint.partitions().stream() - .flatMap(partition -> - partition.partitions().stream().map(partitionId -> new TopicPartition(partition.topic(), partitionId))) - .collect(Collectors.toList()); + List activeTopicPartitions = getTopicPartitionList(endpoint.activePartitions()); + List standbyTopicPartitions = getTopicPartitionList(endpoint.standbyPartitions()); StreamsGroupHeartbeatResponseData.Endpoint userEndpoint = endpoint.userEndpoint(); - partitionsByHost.put(new StreamsRebalanceData.HostInfo(userEndpoint.host(), userEndpoint.port()), topicPartitions); + StreamsRebalanceData.EndpointPartitions endpointPartitions = new StreamsRebalanceData.EndpointPartitions(activeTopicPartitions, standbyTopicPartitions); + partitionsByHost.put(new StreamsRebalanceData.HostInfo(userEndpoint.host(), userEndpoint.port()), endpointPartitions); }); return partitionsByHost; } + + static List getTopicPartitionList(List topicPartitions) { + return topicPartitions.stream() + .flatMap(partition -> + partition.partitions().stream().map(partitionId -> new TopicPartition(partition.topic(), partitionId))) + .collect(Collectors.toList()); + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java index eb006ac8dda5a..84ac83125bea7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java @@ -52,6 +52,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.unmodifiableList; + /** * Tracks the state of a single member in relationship to a group: *

@@ -171,7 +173,7 @@ public int hashCode() { private MemberState state; /** - * Group ID of the Streams group the member will be part of, provided when creating the current + * Group ID of the streams group the member will be part of, provided when creating the current * membership manager. */ private final String groupId; @@ -294,7 +296,7 @@ public StreamsMembershipManager(final String groupId, this.backgroundEventHandler = backgroundEventHandler; this.streamsRebalanceData = streamsRebalanceData; this.subscriptionState = subscriptionState; - metricsManager = new ConsumerRebalanceMetricsManager(metrics); + metricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState); this.time = time; } @@ -661,7 +663,7 @@ public void onHeartbeatSuccess(StreamsGroupHeartbeatResponse response) { "already leaving the group.", memberId, memberEpoch); return; } - if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { + if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; @@ -671,6 +673,13 @@ public void onHeartbeatSuccess(StreamsGroupHeartbeatResponse response) { " so it's not a member of the group. ", memberId, state); return; } + if (responseData.memberEpoch() < 0) { + log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " + + "is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state, + responseData.memberEpoch()); + maybeCompleteLeaveInProgress(); + return; + } updateMemberEpoch(responseData.memberEpoch()); @@ -1001,8 +1010,8 @@ private void maybeReconcile() { return; } if (reconciliationInProgress) { - log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. Assignment " + - targetAssignment + " will be handled in the next reconciliation loop."); + log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. Assignment {}" + + " will be handled in the next reconciliation loop.", targetAssignment); return; } @@ -1122,12 +1131,12 @@ private CompletableFuture assignTasks(final SortedSet partitionsToAssign = topicPartitionsForActiveTasks(activeTasksToAssign); - final SortedSet partitionsToAssigneNotPreviouslyOwned = + final SortedSet partitionsToAssignNotPreviouslyOwned = partitionsToAssignNotPreviouslyOwned(partitionsToAssign, topicPartitionsForActiveTasks(ownedActiveTasks)); subscriptionState.assignFromSubscribedAwaitingCallback( partitionsToAssign, - partitionsToAssigneNotPreviouslyOwned + partitionsToAssignNotPreviouslyOwned ); notifyAssignmentChange(partitionsToAssign); @@ -1143,10 +1152,10 @@ private CompletableFuture assignTasks(final SortedSet topicPartitionsForActiveTasks(final SortedSet< Stream.concat( streamsRebalanceData.subtopologies().get(task.subtopologyId()).sourceTopics().stream(), streamsRebalanceData.subtopologies().get(task.subtopologyId()).repartitionSourceTopics().keySet().stream() - ).forEach(topic -> { - topicPartitions.add(new TopicPartition(topic, task.partitionId())); - }) + ).forEach(topic -> + topicPartitions.add(new TopicPartition(topic, task.partitionId())) + ) ); return topicPartitions; } @@ -1214,7 +1223,7 @@ private boolean maybeAbortReconciliation() { String reason = rejoinedWhileReconciliationInProgress ? "the member has re-joined the group" : "the member already transitioned out of the reconciling state into " + state; - log.info("Interrupting reconciliation that is not relevant anymore because " + reason); + log.info("Interrupting reconciliation that is not relevant anymore because {}", reason); markReconciliationCompleted(); } return shouldAbort; @@ -1305,4 +1314,9 @@ public void onAllTasksLostCallbackCompleted(final StreamsOnAllTasksLostCallbackC future.complete(null); } } + + // visible for testing + List stateListeners() { + return unmodifiableList(stateUpdatesListeners); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceData.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceData.java index 6157b66cf16ed..2fe7ae8ad35d2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceData.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceData.java @@ -17,7 +17,9 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -118,6 +120,31 @@ public String toString() { } } + public static class EndpointPartitions { + private final List activePartitions; + private final List standbyPartitions; + + public EndpointPartitions(final List activePartitions, + final List standbyPartitions) { + this.activePartitions = activePartitions; + this.standbyPartitions = standbyPartitions; + } + + public List activePartitions() { + return new ArrayList<>(activePartitions); + } + + public List standbyPartitions() { + return new ArrayList<>(standbyPartitions); + } + @Override + public String toString() { + return "EndpointPartitions {" + + "activePartitions=" + activePartitions + + ", standbyPartitions=" + standbyPartitions + + '}'; + } + } public static class Assignment { @@ -296,10 +323,12 @@ public String toString() { private final AtomicReference reconciledAssignment = new AtomicReference<>(Assignment.EMPTY); - private final AtomicReference>> partitionsByHost = new AtomicReference<>(Collections.emptyMap()); + private final AtomicReference> partitionsByHost = new AtomicReference<>(Collections.emptyMap()); private final AtomicBoolean shutdownRequested = new AtomicBoolean(false); + private final AtomicReference> statuses = new AtomicReference<>(List.of()); + public StreamsRebalanceData(final UUID processId, final Optional endpoint, final Map subtopologies, @@ -338,19 +367,32 @@ public Assignment reconciledAssignment() { return reconciledAssignment.get(); } - public void setPartitionsByHost(final Map> partitionsByHost) { + public void setPartitionsByHost(final Map partitionsByHost) { this.partitionsByHost.set(partitionsByHost); } - public Map> partitionsByHost() { + public Map partitionsByHost() { return partitionsByHost.get(); } + /** For the current stream thread to request a shutdown of all Streams clients belonging to the same application. */ public void requestShutdown() { shutdownRequested.set(true); } + /** True if the current stream thread requested a shutdown of all Streams clients belonging to the same application. */ public boolean shutdownRequested() { return shutdownRequested.get(); } + + /** Updated whenever the status of the streams group is updated. */ + public void setStatuses(final List s) { + statuses.set(s); + } + + /** For communicating the current status of the group to the stream thread */ + public List statuses() { + return statuses.get(); + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListener.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListener.java index b8a5db79445b4..2c8a7449a7252 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListener.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListener.java @@ -16,11 +16,10 @@ */ package org.apache.kafka.clients.consumer.internals; -import java.util.Optional; import java.util.Set; /** - * Listener for handling Streams group rebalance events in Kafka Streams. + * Listener for handling streams group rebalance events in Kafka Streams. */ public interface StreamsRebalanceListener { @@ -28,22 +27,18 @@ public interface StreamsRebalanceListener { * Called when tasks are revoked from a stream thread. * * @param tasks The tasks to be revoked. - * @return The exception thrown during the callback, if any. */ - Optional onTasksRevoked(final Set tasks); + void onTasksRevoked(final Set tasks); /** * Called when tasks are assigned from a stream thread. * * @param assignment The tasks assigned. - * @return The exception thrown during the callback, if any. */ - Optional onTasksAssigned(final StreamsRebalanceData.Assignment assignment); + void onTasksAssigned(final StreamsRebalanceData.Assignment assignment); /** - * Called when a stream thread loses all assigned tasks. - * - * @return The exception thrown during the callback, if any. + * Called when a stream thread loses all assigned tasks */ - Optional onAllTasksLost(); + void onAllTasksLost(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvoker.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvoker.java new file mode 100644 index 0000000000000..fc8c78c13dce6 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvoker.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.utils.LogContext; + +import org.slf4j.Logger; + +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +/** + * This class encapsulates the invocation of the callback methods defined in the {@link StreamsRebalanceListener} + * interface. When streams group task assignment changes, these methods are invoked. This class wraps those + * callback calls with some logging and error handling. + */ +public class StreamsRebalanceListenerInvoker { + + private final Logger log; + + private final StreamsRebalanceData streamsRebalanceData; + private Optional listener; + + StreamsRebalanceListenerInvoker(LogContext logContext, StreamsRebalanceData streamsRebalanceData) { + this.log = logContext.logger(getClass()); + this.listener = Optional.empty(); + this.streamsRebalanceData = streamsRebalanceData; + } + + public void setRebalanceListener(StreamsRebalanceListener streamsRebalanceListener) { + Objects.requireNonNull(streamsRebalanceListener, "StreamsRebalanceListener cannot be null"); + this.listener = Optional.of(streamsRebalanceListener); + } + + public Exception invokeAllTasksRevoked() { + if (listener.isEmpty()) { + return null; + } + return invokeTasksRevoked(streamsRebalanceData.reconciledAssignment().activeTasks()); + } + + public Exception invokeTasksAssigned(final StreamsRebalanceData.Assignment assignment) { + if (listener.isEmpty()) { + return null; + } + log.info("Invoking tasks assigned callback for new assignment: {}", assignment); + try { + listener.get().onTasksAssigned(assignment); + } catch (WakeupException | InterruptException e) { + throw e; + } catch (Exception e) { + log.error( + "Streams rebalance listener failed on invocation of onTasksAssigned for tasks {}", + assignment, + e + ); + return e; + } + return null; + } + + public Exception invokeTasksRevoked(final Set tasks) { + if (listener.isEmpty()) { + return null; + } + log.info("Invoking task revoked callback for revoked active tasks {}", tasks); + try { + listener.get().onTasksRevoked(tasks); + } catch (WakeupException | InterruptException e) { + throw e; + } catch (Exception e) { + log.error( + "Streams rebalance listener failed on invocation of onTasksRevoked for tasks {}", + tasks, + e + ); + return e; + } + return null; + } + + public Exception invokeAllTasksLost() { + if (listener.isEmpty()) { + return null; + } + log.info("Invoking tasks lost callback for all tasks"); + try { + listener.get().onAllTasksLost(); + } catch (WakeupException | InterruptException e) { + throw e; + } catch (Exception e) { + log.error( + "Streams rebalance listener failed on invocation of onTasksLost.", + e + ); + return e; + } + return null; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java index e237165f5b771..9d44e98ed3954 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java @@ -25,6 +25,7 @@ import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.PartitionStates; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.utils.LogContext; @@ -91,6 +92,13 @@ private enum SubscriptionType { /* the list of topics the user has requested */ private Set subscription; + /** + * Topic IDs received in an assignment from the coordinator when using the Consumer rebalance protocol. + * This will be used to include assigned topic IDs in metadata requests when the consumer + * does not know the topic names (ex. when the user subscribes to a RE2J regex computed on the broker) + */ + private Set assignedTopicIds; + /* The list of topics the group has subscribed to. This may include some topics which are not part * of `subscription` for the leader of a group since it is responsible for detecting metadata changes * which require a group rebalance. */ @@ -149,6 +157,7 @@ public SubscriptionState(LogContext logContext, AutoOffsetResetStrategy defaultR this.log = logContext.logger(this.getClass()); this.defaultResetStrategy = defaultResetStrategy; this.subscription = new TreeSet<>(); // use a sorted set for better logging + this.assignedTopicIds = new TreeSet<>(); this.assignment = new PartitionStates<>(); this.groupSubscription = new HashSet<>(); this.subscribedPattern = null; @@ -338,6 +347,7 @@ public synchronized void unsubscribe() { this.subscription = Collections.emptySet(); this.groupSubscription = Collections.emptySet(); this.assignment.clear(); + this.assignedTopicIds = Collections.emptySet(); this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; this.assignmentId++; @@ -467,7 +477,7 @@ public synchronized List assignedPartitionsList() { * Provides the number of assigned partitions in a thread safe manner. * @return the number of assigned partitions. */ - synchronized int numAssignedPartitions() { + public synchronized int numAssignedPartitions() { return this.assignment.size(); } @@ -477,7 +487,7 @@ public synchronized List fetchablePartitions(Predicate result = new ArrayList<>(); assignment.forEach((topicPartition, topicPartitionState) -> { // Cheap check is first to avoid evaluating the predicate if possible - if ((subscriptionType.equals(SubscriptionType.AUTO_TOPICS_SHARE) || topicPartitionState.isFetchable()) + if ((subscriptionType.equals(SubscriptionType.AUTO_TOPICS_SHARE) || isFetchableAndSubscribed(topicPartition, topicPartitionState)) && isAvailable.test(topicPartition)) { result.add(topicPartition); } @@ -485,23 +495,34 @@ public synchronized List fetchablePartitions(Predicate assignedTopicIds() { + return assignedTopicIds; + } + + /** + * Set the set of topic IDs that have been assigned to the consumer by the coordinator. + * This is used for topic IDs received in an assignment when using the new consumer rebalance protocol (KIP-848). + */ + public synchronized void setAssignedTopicIds(Set assignedTopicIds) { + this.assignedTopicIds = assignedTopicIds; + } + /** * Enable fetching and updating positions for the given partitions that were assigned to the * consumer, but waiting for the onPartitionsAssigned callback to complete. This is diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSet.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSet.java new file mode 100644 index 0000000000000..6bf708d9354e7 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSet.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +/** + * Represents a set of topic partitions, where each entry contains topic ID, topic name and partition number. + * Keeps in-memory references to provide easy access to this data in different forms. + * (ex. retrieve topic IDs only, topic names, partitions with topic names, partitions with topic IDs) + * Data is kept sorted by topic name and partition number, for improved logging. + */ +public class TopicIdPartitionSet { + + /** + * TopicPartition comparator based on topic name and partition. + */ + static final Utils.TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new Utils.TopicPartitionComparator(); + + /** + * TopicIdPartition comparator based on topic name and partition. + * (Ignoring topic ID while sorting, as this is sorted mainly for logging purposes). + */ + static final Utils.TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new Utils.TopicIdPartitionComparator(); + + private final SortedSet topicIdPartitions; + private final SortedSet topicPartitions; + private final Set topicIds; + private final SortedSet topicNames; + + public TopicIdPartitionSet() { + this.topicIdPartitions = new TreeSet<>(TOPIC_ID_PARTITION_COMPARATOR); + this.topicPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + this.topicIds = new HashSet<>(); + this.topicNames = new TreeSet<>(); + } + + /** + * Add a single partition to the assignment, along with its topic ID and name. + * This will keep it, and also save references to the topic ID, topic name and partition. + * Visible for testing. + */ + void add(TopicIdPartition topicIdPartition) { + topicIdPartitions.add(topicIdPartition); + topicPartitions.add(topicIdPartition.topicPartition()); + topicIds.add(topicIdPartition.topicId()); + topicNames.add(topicIdPartition.topicPartition().topic()); + } + + /** + * Add a set of partitions to the assignment, along with the topic ID and name. + */ + public void addAll(Uuid topicId, String topicName, Set partitions) { + partitions.forEach(tp -> add(new TopicIdPartition(topicId, tp, topicName))); + } + + public boolean isEmpty() { + return this.topicIdPartitions.isEmpty(); + } + + public SortedSet topicPartitions() { + return Collections.unmodifiableSortedSet(topicPartitions); + } + + public Set topicIds() { + return Collections.unmodifiableSet(topicIds); + } + + public SortedSet topicNames() { + return Collections.unmodifiableSortedSet(topicNames); + } + + /** + * @return Map of partition numbers per topic ID, sorted by topic names (for improved logging). + */ + public Map> toTopicIdPartitionMap() { + Map> partitions = new HashMap<>(); + topicIdPartitions.forEach(topicIdPartition -> { + Uuid topicId = topicIdPartition.topicId(); + partitions.computeIfAbsent(topicId, k -> new TreeSet<>()).add(topicIdPartition.partition()); + }); + return partitions; + } + + /** + * @return Set of topic partitions (with topic name and partition number) + */ + protected SortedSet toTopicNamePartitionSet() { + SortedSet result = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + topicIdPartitions.forEach(topicIdPartition -> result.add(topicIdPartition.topicPartition())); + return result; + } + + @Override + public String toString() { + return this.topicIdPartitions.toString(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java index 2d9cab0dd9686..fcef3ce2647af 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java @@ -33,15 +33,16 @@ import org.slf4j.Logger; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult.EMPTY; @@ -84,16 +85,23 @@ public TopicMetadataRequestManager(final LogContext context, final Time time, fi @Override public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { // Prune any requests which have timed out - List expiredRequests = inflightRequests.stream() - .filter(TimedRequestState::isExpired) - .collect(Collectors.toList()); - expiredRequests.forEach(TopicMetadataRequestState::expire); + Iterator requestStateIterator = inflightRequests.iterator(); - List requests = inflightRequests.stream() - .map(req -> req.send(currentTimeMs)) - .filter(Optional::isPresent) - .map(Optional::get) - .collect(Collectors.toList()); + while (requestStateIterator.hasNext()) { + TopicMetadataRequestState requestState = requestStateIterator.next(); + + if (requestState.isExpired()) { + requestState.expire(); + requestStateIterator.remove(); + } + } + + List requests = new ArrayList<>(); + + for (TopicMetadataRequestState request : inflightRequests) { + Optional unsentRequest = request.send(currentTimeMs); + unsentRequest.ifPresent(requests::add); + } return requests.isEmpty() ? EMPTY : new NetworkClientDelegate.PollResult(0, requests); } @@ -181,7 +189,9 @@ private Optional send(final long currentTim } private void expire() { - completeFutureAndRemoveRequest( + // The request state is removed from inflightRequests via an iterator by the caller of this method, + // so don't remove it from inflightRequests here. + future.completeExceptionally( new TimeoutException("Timeout expired while fetching topic metadata")); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java index 18b4ea1714463..f3f0e161015b4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer; import org.apache.kafka.clients.consumer.internals.ShareConsumerImpl; -import org.apache.kafka.common.Uuid; import java.util.Objects; @@ -48,12 +47,6 @@ public enum Type { private final Type type; - /** - * This identifies a particular event. It is used to disambiguate events via {@link #hashCode()} and - * {@link #equals(Object)} and can be used in log messages when debugging. - */ - private final Uuid id; - /** * The time in milliseconds when this event was enqueued. * This field can be changed after the event is created, so it should not be used in hashCode or equals. @@ -62,17 +55,12 @@ public enum Type { protected ApplicationEvent(Type type) { this.type = Objects.requireNonNull(type); - this.id = Uuid.randomUuid(); } public Type type() { return type; } - public Uuid id() { - return id; - } - public void setEnqueuedMs(long enqueuedMs) { this.enqueuedMs = enqueuedMs; } @@ -81,21 +69,8 @@ public long enqueuedMs() { return enqueuedMs; } - @Override - public final boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ApplicationEvent that = (ApplicationEvent) o; - return type == that.type && id.equals(that.id); - } - - @Override - public final int hashCode() { - return Objects.hash(type, id); - } - protected String toStringBase() { - return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; + return "type=" + type + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java index 1ca51cca62e85..853c5484df5be 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java @@ -310,7 +310,7 @@ private void process(final AssignmentChangeEvent event) { manager.updateTimerAndMaybeCommit(event.currentTimeMs()); } - log.info("Assigned to partition(s): {}", event.partitions().stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + log.info("Assigned to partition(s): {}", event.partitions()); try { if (subscriptions.assignFromUser(new HashSet<>(event.partitions()))) metadata.requestUpdateForNewTopics(); @@ -497,7 +497,7 @@ private void process(final LeaveGroupOnCloseEvent event) { CompletableFuture future = requestManagers.consumerMembershipManager.get().leaveGroupOnClose(event.membershipOperation()); future.whenComplete(complete(event.future())); } else if (requestManagers.streamsMembershipManager.isPresent()) { - log.debug("Signal the StreamsMembershipManager to leave the Streams group since the member is closing"); + log.debug("Signal the StreamsMembershipManager to leave the streams group since the member is closing"); CompletableFuture future = requestManagers.streamsMembershipManager.get().leaveGroupOnClose(); future.whenComplete(complete(event.future())); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java index b2f8a3666c499..6fa737c727805 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread; -import org.apache.kafka.common.Uuid; import java.util.Objects; @@ -37,12 +36,6 @@ public enum Type { private final Type type; - /** - * This identifies a particular event. It is used to disambiguate events via {@link #hashCode()} and - * {@link #equals(Object)} and can be used in log messages when debugging. - */ - private final Uuid id; - /** * The time in milliseconds when this event was enqueued. * This field can be changed after the event is created, so it should not be used in hashCode or equals. @@ -51,17 +44,12 @@ public enum Type { protected BackgroundEvent(Type type) { this.type = Objects.requireNonNull(type); - this.id = Uuid.randomUuid(); } public Type type() { return type; } - public Uuid id() { - return id; - } - public void setEnqueuedMs(long enqueuedMs) { this.enqueuedMs = enqueuedMs; } @@ -70,21 +58,8 @@ public long enqueuedMs() { return enqueuedMs; } - @Override - public final boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - BackgroundEvent that = (BackgroundEvent) o; - return type == that.type && id.equals(that.id); - } - - @Override - public final int hashCode() { - return Objects.hash(type, id); - } - protected String toStringBase() { - return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; + return "type=" + type + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java index 20231b0f99a10..bb59a4ec7e389 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java @@ -45,7 +45,7 @@ public interface CompletableEvent { * (if applicable) is passed to {@link CompletableFuture#complete(Object)}. In the case where the generic * bound type is specified as {@link Void}, {@code null} is provided. *

  • - * Error: when the the event logic generates an error, the error is passed to + * Error: when the event logic generates an error, the error is passed to * {@link CompletableFuture#completeExceptionally(Throwable)}. *
  • *
  • diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java index 5a0358df8964f..b4440de06264b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java @@ -25,11 +25,10 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collection; +import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.concurrent.CompletableFuture; -import java.util.function.Consumer; -import java.util.stream.Collectors; /** * {@code CompletableEventReaper} is responsible for tracking {@link CompletableEvent time-bound events} and removing @@ -85,26 +84,39 @@ public void add(CompletableEvent event) { * @return The number of events that were expired */ public long reap(long currentTimeMs) { - Consumer> expireEvent = event -> { - long pastDueMs = currentTimeMs - event.deadlineMs(); - TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, event.deadlineMs())); + int count = 0; + Iterator> iterator = tracked.iterator(); + + while (iterator.hasNext()) { + CompletableEvent event = iterator.next(); + + if (event.future().isDone()) { + // Remove any events that are already complete. + iterator.remove(); + continue; + } + + long deadlineMs = event.deadlineMs(); + long pastDueMs = currentTimeMs - deadlineMs; + + if (pastDueMs < 0) + continue; + + TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, deadlineMs)); + + // Complete (exceptionally) any events that have passed their deadline AND aren't already complete. if (event.future().completeExceptionally(error)) { - log.debug("Event {} completed exceptionally since its expiration of {} passed {} ms ago", event, event.deadlineMs(), pastDueMs); + log.debug("Event {} completed exceptionally since its expiration of {} passed {} ms ago", event, deadlineMs, pastDueMs); } else { log.trace("Event {} not completed exceptionally since it was previously completed", event); } - }; - - // First, complete (exceptionally) any events that have passed their deadline AND aren't already complete. - long count = tracked.stream() - .filter(e -> !e.future().isDone()) - .filter(e -> currentTimeMs >= e.deadlineMs()) - .peek(expireEvent) - .count(); - // Second, remove any events that are already complete, just to make sure we don't hold references. This will - // include any events that finished successfully as well as any events we just completed exceptionally above. - tracked.removeIf(e -> e.future().isDone()); + + count++; + + // Remove the events so that we don't hold a reference to it. + iterator.remove(); + } return count; } @@ -131,29 +143,12 @@ public long reap(long currentTimeMs) { public long reap(Collection events) { Objects.requireNonNull(events, "Event queue to reap must be non-null"); - Consumer> expireEvent = event -> { - TimeoutException error = new TimeoutException(String.format("%s could not be completed before the consumer closed", event.getClass().getSimpleName())); - - if (event.future().completeExceptionally(error)) { - log.debug("Event {} completed exceptionally since the consumer is closing", event); - } else { - log.trace("Event {} not completed exceptionally since it was completed prior to the consumer closing", event); - } - }; - - long trackedExpiredCount = tracked.stream() - .filter(e -> !e.future().isDone()) - .peek(expireEvent) - .count(); + long trackedExpiredCount = completeEventsExceptionallyOnClose(tracked); tracked.clear(); - long eventExpiredCount = events.stream() - .filter(e -> e instanceof CompletableEvent) - .map(e -> (CompletableEvent) e) - .filter(e -> !e.future().isDone()) - .peek(expireEvent) - .count(); + long eventExpiredCount = completeEventsExceptionallyOnClose(events); events.clear(); + return trackedExpiredCount + eventExpiredCount; } @@ -166,9 +161,51 @@ public boolean contains(CompletableEvent event) { } public List> uncompletedEvents() { - return tracked.stream() - .filter(e -> !e.future().isDone()) - .collect(Collectors.toList()); + // The following code does not use the Java Collections Streams API to reduce overhead in the critical + // path of the ConsumerNetworkThread loop. + List> events = new ArrayList<>(); + + for (CompletableEvent event : tracked) { + if (!event.future().isDone()) + events.add(event); + } + + return events; + } + + /** + * For all the {@link CompletableEvent}s in the collection, if they're not already complete, invoke + * {@link CompletableFuture#completeExceptionally(Throwable)}. + * + * @param events Collection of objects, assumed to be subclasses of {@link ApplicationEvent} or + * {@link BackgroundEvent}, but will only perform completion for any + * unfinished {@link CompletableEvent}s + * + * @return Number of events closed + */ + private long completeEventsExceptionallyOnClose(Collection events) { + long count = 0; + + for (Object o : events) { + if (!(o instanceof CompletableEvent)) + continue; + + CompletableEvent event = (CompletableEvent) o; + + if (event.future().isDone()) + continue; + + count++; + + TimeoutException error = new TimeoutException(String.format("%s could not be completed before the consumer closed", event.getClass().getSimpleName())); + + if (event.future().completeExceptionally(error)) { + log.debug("Event {} completed exceptionally since the consumer is closing", event); + } else { + log.trace("Event {} not completed exceptionally since it was completed prior to the consumer closing", event); + } + } + + return count; } - } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java index 09e84cbe985cc..2f90440a66244 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java @@ -24,10 +24,7 @@ import java.util.Arrays; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; - -public class AsyncConsumerMetrics extends KafkaConsumerMetrics implements AutoCloseable { +public class AsyncConsumerMetrics implements AutoCloseable { private final Metrics metrics; public static final String TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME = "time-between-network-thread-poll"; @@ -51,15 +48,13 @@ public class AsyncConsumerMetrics extends KafkaConsumerMetrics implements AutoCl private final Sensor unsentRequestsQueueSizeSensor; private final Sensor unsentRequestsQueueTimeSensor; - public AsyncConsumerMetrics(Metrics metrics) { - super(metrics, CONSUMER_METRIC_GROUP_PREFIX); - + public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.metrics = metrics; this.timeBetweenNetworkThreadPollSensor = metrics.sensor(TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME); this.timeBetweenNetworkThreadPollSensor.add( metrics.metricName( "time-between-network-thread-poll-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time taken, in milliseconds, between each poll in the network thread." ), new Avg() @@ -67,7 +62,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.timeBetweenNetworkThreadPollSensor.add( metrics.metricName( "time-between-network-thread-poll-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time taken, in milliseconds, between each poll in the network thread." ), new Max() @@ -77,7 +72,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.applicationEventQueueSizeSensor.add( metrics.metricName( APPLICATION_EVENT_QUEUE_SIZE_SENSOR_NAME, - CONSUMER_METRIC_GROUP, + groupName, "The current number of events in the queue to send from the application thread to the background thread." ), new Value() @@ -87,7 +82,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.applicationEventQueueTimeSensor.add( metrics.metricName( "application-event-queue-time-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time, in milliseconds, that application events are taking to be dequeued." ), new Avg() @@ -95,7 +90,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.applicationEventQueueTimeSensor.add( metrics.metricName( "application-event-queue-time-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time, in milliseconds, that an application event took to be dequeued." ), new Max() @@ -105,14 +100,14 @@ public AsyncConsumerMetrics(Metrics metrics) { this.applicationEventQueueProcessingTimeSensor.add( metrics.metricName( "application-event-queue-processing-time-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time, in milliseconds, that the background thread takes to process all available application events." ), new Avg() ); this.applicationEventQueueProcessingTimeSensor.add( metrics.metricName("application-event-queue-processing-time-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time, in milliseconds, that the background thread took to process all available application events." ), new Max() @@ -122,7 +117,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.applicationEventExpiredSizeSensor.add( metrics.metricName( APPLICATION_EVENT_EXPIRED_SIZE_SENSOR_NAME, - CONSUMER_METRIC_GROUP, + groupName, "The current number of expired application events." ), new Value() @@ -132,7 +127,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.unsentRequestsQueueSizeSensor.add( metrics.metricName( UNSENT_REQUESTS_QUEUE_SIZE_SENSOR_NAME, - CONSUMER_METRIC_GROUP, + groupName, "The current number of unsent requests in the background thread." ), new Value() @@ -142,7 +137,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.unsentRequestsQueueTimeSensor.add( metrics.metricName( "unsent-requests-queue-time-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time, in milliseconds, that requests are taking to be sent in the background thread." ), new Avg() @@ -150,7 +145,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.unsentRequestsQueueTimeSensor.add( metrics.metricName( "unsent-requests-queue-time-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time, in milliseconds, that a request remained unsent in the background thread." ), new Max() @@ -160,7 +155,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.backgroundEventQueueSizeSensor.add( metrics.metricName( BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, - CONSUMER_METRIC_GROUP, + groupName, "The current number of events in the queue to send from the background thread to the application thread." ), new Value() @@ -170,7 +165,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.backgroundEventQueueTimeSensor.add( metrics.metricName( "background-event-queue-time-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time, in milliseconds, that background events are taking to be dequeued." ), new Avg() @@ -178,7 +173,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.backgroundEventQueueTimeSensor.add( metrics.metricName( "background-event-queue-time-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time, in milliseconds, that background events are taking to be dequeued." ), new Max() @@ -188,7 +183,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.backgroundEventQueueProcessingTimeSensor.add( metrics.metricName( "background-event-queue-processing-time-avg", - CONSUMER_METRIC_GROUP, + groupName, "The average time, in milliseconds, that the consumer took to process all available background events." ), new Avg() @@ -196,7 +191,7 @@ public AsyncConsumerMetrics(Metrics metrics) { this.backgroundEventQueueProcessingTimeSensor.add( metrics.metricName( "background-event-queue-processing-time-max", - CONSUMER_METRIC_GROUP, + groupName, "The maximum time, in milliseconds, that the consumer took to process all available background events." ), new Max() @@ -257,6 +252,5 @@ public void close() { unsentRequestsQueueSizeSensor.name(), unsentRequestsQueueTimeSensor.name() ).forEach(metrics::removeSensor); - super.close(); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java index c312edd54b602..e271dee526172 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.clients.consumer.internals.metrics; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; @@ -27,7 +29,9 @@ import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.metrics.stats.WindowedCount; +import java.util.Collection; import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; @@ -44,11 +48,14 @@ public final class ConsumerRebalanceMetricsManager extends RebalanceMetricsManag public final MetricName lastRebalanceSecondsAgo; public final MetricName failedRebalanceTotal; public final MetricName failedRebalanceRate; + public final MetricName assignedPartitionsCount; private long lastRebalanceEndMs = -1L; private long lastRebalanceStartMs = -1L; + private final Metrics metrics; - public ConsumerRebalanceMetricsManager(Metrics metrics) { + public ConsumerRebalanceMetricsManager(Metrics metrics, SubscriptionState subscriptions) { super(CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX); + this.metrics = metrics; rebalanceLatencyAvg = createMetric(metrics, "rebalance-latency-avg", "The average time in ms taken for a group to complete a rebalance"); @@ -64,6 +71,9 @@ public ConsumerRebalanceMetricsManager(Metrics metrics) { "The total number of failed rebalance events"); failedRebalanceRate = createMetric(metrics, "failed-rebalance-rate-per-hour", "The number of failed rebalance events per hour"); + assignedPartitionsCount = createMetric(metrics, "assigned-partitions", + "The number of partitions currently assigned to this consumer"); + registerAssignedPartitionCount(subscriptions); successfulRebalanceSensor = metrics.sensor("rebalance-latency"); successfulRebalanceSensor.add(rebalanceLatencyAvg, new Avg()); @@ -106,4 +116,15 @@ public void maybeRecordRebalanceFailed() { public boolean rebalanceStarted() { return lastRebalanceStartMs > lastRebalanceEndMs; } + + /** + * Register metric to track the number of assigned partitions. + * It will consider partitions assigned to the consumer + * regardless of whether they were assigned via {@link KafkaConsumer#subscribe(Pattern)} or + * {@link KafkaConsumer#assign(Collection)} + */ + private void registerAssignedPartitionCount(SubscriptionState subscriptions) { + Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions(); + metrics.addMetric(assignedPartitionsCount, numParts); + } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java index 52502e714a947..1b2bb4518f979 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java @@ -26,7 +26,7 @@ import java.util.concurrent.TimeUnit; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRICS_SUFFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; public class KafkaConsumerMetrics implements AutoCloseable { private final Metrics metrics; @@ -39,9 +39,9 @@ public class KafkaConsumerMetrics implements AutoCloseable { private long pollStartMs; private long timeSinceLastPollMs; - public KafkaConsumerMetrics(Metrics metrics, String metricGrpPrefix) { + public KafkaConsumerMetrics(Metrics metrics) { this.metrics = metrics; - final String metricGroupName = metricGrpPrefix + CONSUMER_METRICS_SUFFIX; + final String metricGroupName = CONSUMER_METRIC_GROUP; Measurable lastPoll = (mConfig, now) -> { if (lastPollMs == 0L) // if no poll is ever triggered, just return -1. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java index b7da8245aaaa8..e154b97da5a80 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java @@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRICS_SUFFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; public class KafkaShareConsumerMetrics implements AutoCloseable { private final Metrics metrics; @@ -36,9 +36,9 @@ public class KafkaShareConsumerMetrics implements AutoCloseable { private long pollStartMs; private long timeSinceLastPollMs; - public KafkaShareConsumerMetrics(Metrics metrics, String metricGrpPrefix) { + public KafkaShareConsumerMetrics(Metrics metrics) { this.metrics = metrics; - final String metricGroupName = metricGrpPrefix + CONSUMER_METRICS_SUFFIX; + final String metricGroupName = CONSUMER_SHARE_METRIC_GROUP; Measurable lastPoll = (mConfig, now) -> { if (lastPollMs == 0L) // if no poll is ever triggered, just return -1. diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 64a3af83e6571..6e656f590e42f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -50,11 +50,13 @@ import org.apache.kafka.common.errors.AuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -75,8 +77,8 @@ import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetryUtils; import org.apache.kafka.common.utils.AppInfoParser; -import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; @@ -256,7 +258,7 @@ public class KafkaProducer implements Producer { private final ProducerMetadata metadata; private final RecordAccumulator accumulator; private final Sender sender; - private final Thread ioThread; + private final Sender.SenderThread ioThread; private final Compression compression; private final Sensor errors; private final Time time; @@ -416,7 +418,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this.transactionManager = configureTransactionState(config, logContext); // There is no need to do work required for adaptive partitioning, if we use a custom partitioner. boolean enableAdaptivePartitioning = partitionerPlugin.get() == null && - config.getBoolean(ProducerConfig.PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG); + config.getBoolean(ProducerConfig.PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG); RecordAccumulator.PartitionerConfig partitionerConfig = new RecordAccumulator.PartitionerConfig( enableAdaptivePartitioning, config.getLong(ProducerConfig.PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG) @@ -454,7 +456,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this.errors = this.metrics.sensor("errors"); this.sender = newSender(logContext, kafkaClient, this.metadata); String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId; - this.ioThread = new KafkaThread(ioThreadName, this.sender, true); + this.ioThread = new Sender.SenderThread(ioThreadName, this.sender, true); this.ioThread.start(); config.logUnused(); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -480,7 +482,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali ProducerInterceptors interceptors, Partitioner partitioner, Time time, - KafkaThread ioThread, + Sender.SenderThread ioThread, Optional clientTelemetryReporter) { this.producerConfig = config; this.time = time; @@ -596,14 +598,17 @@ private TransactionManager configureTransactionState(ProducerConfig config, if (config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { final String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); + final boolean enable2PC = config.getBoolean(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG); final int transactionTimeoutMs = config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); final long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); + transactionManager = new TransactionManager( logContext, transactionalId, transactionTimeoutMs, retryBackoffMs, - apiVersions + apiVersions, + enable2PC ); if (transactionManager.isTransactional()) @@ -618,8 +623,13 @@ private TransactionManager configureTransactionState(ProducerConfig config, } /** + * Initialize the transactional state for this producer, similar to {@link #initTransactions()} but + * with additional capabilities to keep a previously prepared transaction. + * * Needs to be called before any other methods when the {@code transactional.id} is set in the configuration. - * This method does the following: + * + * When {@code keepPreparedTxn} is {@code false}, this behaves like the standard transactional + * initialization where the method does the following: *
      *
    1. Ensures any transactions initiated by previous instances of the producer with the same * {@code transactional.id} are completed. If the previous instance had failed with a transaction in @@ -628,26 +638,39 @@ private TransactionManager configureTransactionState(ProducerConfig config, *
    2. Gets the internal producer id and epoch, used in all future transactional * messages issued by the producer.
    3. *
    + * + *

    + * When {@code keepPreparedTxn} is set to {@code true}, the producer does not automatically abort existing + * transactions. Instead, it enters a recovery mode allowing only finalization of those previously + * prepared transactions. + * This behavior is especially crucial for 2PC scenarios, where transactions should remain intact + * until the external transaction manager decides whether to commit or abort. + *

    + * + * @param keepPreparedTxn true to retain any in-flight prepared transactions (necessary for 2PC + * recovery), false to abort existing transactions and behave like + * the standard initTransactions. + * * Note that this method will raise {@link TimeoutException} if the transactional state cannot * be initialized before expiration of {@code max.block.ms}. Additionally, it will raise {@link InterruptException} * if interrupted. It is safe to retry in either case, but once the transactional state has been successfully * initialized, this method should no longer be used. * - * @throws IllegalStateException if no {@code transactional.id} has been configured - * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker - * does not support transactions (i.e. if its version is lower than 0.11.0.0) - * @throws org.apache.kafka.common.errors.AuthorizationException error indicating that the configured - * transactional.id is not authorized, or the idempotent producer id is unavailable. See the exception for - * more details. User may retry this function call after fixing the permission. - * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error + * @throws IllegalStateException if no {@code transactional.id} is configured + * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the broker does not + * support transactions (i.e. if its version is lower than 0.11.0.0) + * @throws org.apache.kafka.common.errors.TransactionalIdAuthorizationException if the configured + * {@code transactional.id} is unauthorized either for normal transaction writes or 2PC. + * @throws KafkaException if the producer encounters a fatal error or any other unexpected error * @throws TimeoutException if the time taken for initialize the transaction has surpassed max.block.ms. * @throws InterruptException if the thread is interrupted while blocked */ - public void initTransactions() { + public void initTransactions(boolean keepPreparedTxn) { throwIfNoTransactionManager(); throwIfProducerClosed(); + throwIfInPreparedState(); long now = time.nanoseconds(); - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(keepPreparedTxn); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordInit(time.nanoseconds() - now); @@ -672,6 +695,7 @@ public void initTransactions() { public void beginTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); + throwIfInPreparedState(); long now = time.nanoseconds(); transactionManager.beginTransaction(); producerMetrics.recordBeginTxn(time.nanoseconds() - now); @@ -731,6 +755,7 @@ public void sendOffsetsToTransaction(Map offs throwIfInvalidGroupMetadata(groupMetadata); throwIfNoTransactionManager(); throwIfProducerClosed(); + throwIfInPreparedState(); if (!offsets.isEmpty()) { long start = time.nanoseconds(); @@ -741,6 +766,49 @@ public void sendOffsetsToTransaction(Map offs } } + /** + * Prepares the current transaction for a two-phase commit. This method will flush all pending messages + * and transition the producer into a mode where only {@link #commitTransaction()}, {@link #abortTransaction()}, + * or completeTransaction(PreparedTxnState) may be called. + *

    + * This method is used as part of a two-phase commit protocol: + *

      + *
    1. Prepare the transaction by calling this method. This returns a {@link PreparedTxnState} if successful.
    2. + *
    3. Make any external system changes that need to be atomic with this transaction.
    4. + *
    5. Complete the transaction by calling {@link #commitTransaction()}, {@link #abortTransaction()} or + * completeTransaction(PreparedTxnState).
    6. + *
    + * + * @return the prepared transaction state to use when completing the transaction + * + * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started yet. + * @throws InvalidTxnStateException if the producer is not in a state where preparing + * a transaction is possible or 2PC is not enabled. + * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active + * @throws UnsupportedVersionException fatal error indicating the broker + * does not support transactions (i.e. if its version is lower than 0.11.0.0) + * @throws AuthorizationException fatal error indicating that the configured + * transactional.id is not authorized. See the exception for more details + * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error + * @throws TimeoutException if the time taken for preparing the transaction has surpassed max.block.ms + * @throws InterruptException if the thread is interrupted while blocked + */ + @Override + public PreparedTxnState prepareTransaction() throws ProducerFencedException { + throwIfNoTransactionManager(); + throwIfProducerClosed(); + throwIfInPreparedState(); + if (!transactionManager.is2PCEnabled()) { + throw new InvalidTxnStateException("Cannot prepare a transaction when 2PC is not enabled"); + } + long now = time.nanoseconds(); + flush(); + transactionManager.prepareTransaction(); + producerMetrics.recordPrepareTxn(time.nanoseconds() - now); + ProducerIdAndEpoch producerIdAndEpoch = transactionManager.preparedTransactionState(); + return new PreparedTxnState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); + } + /** * Commits the ongoing transaction. This method will flush any unsent records before actually committing the transaction. *

    @@ -818,6 +886,41 @@ public void abortTransaction() throws ProducerFencedException { producerMetrics.recordAbortTxn(time.nanoseconds() - abortStart); } + /** + * Completes a prepared transaction by comparing the provided prepared transaction state with the + * current prepared state on the producer. + * If they match, the transaction is committed; otherwise, it is aborted. + * + * @param preparedTxnState The prepared transaction state to compare against the current state + * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started + * @throws InvalidTxnStateException if the producer is not in prepared state + * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active + * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error + * @throws TimeoutException if the time taken for completing the transaction has surpassed max.block.ms + * @throws InterruptException if the thread is interrupted while blocked + */ + @Override + public void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException { + throwIfNoTransactionManager(); + throwIfProducerClosed(); + + if (!transactionManager.isPrepared()) { + throw new InvalidTxnStateException("Cannot complete transaction because no transaction has been prepared. " + + "Call prepareTransaction() first, or make sure initTransaction(true) was called."); + } + + // Get the current prepared transaction state + ProducerIdAndEpoch currentProducerIdAndEpoch = transactionManager.preparedTransactionState(); + PreparedTxnState currentPreparedState = new PreparedTxnState(currentProducerIdAndEpoch.producerId, currentProducerIdAndEpoch.epoch); + + // Compare the prepared transaction state token and commit or abort accordingly + if (currentPreparedState.equals(preparedTxnState)) { + commitTransaction(); + } else { + abortTransaction(); + } + } + /** * Asynchronously send a record to a topic. Equivalent to send(record, null). * See {@link #send(ProducerRecord, Callback)} for details. @@ -924,15 +1027,15 @@ public Future send(ProducerRecord record) { * expensive callbacks it is recommended to use your own {@link java.util.concurrent.Executor} in the callback body * to parallelize processing. * - * @param record The record to send + * @param record The record to send * @param callback A user-supplied callback to execute when the record has been acknowledged by the server (null - * indicates no callback) - * - * @throws IllegalStateException if a transactional.id has been configured and no transaction has been started, or - * when send is invoked after producer has been closed. - * @throws InterruptException If the thread is interrupted while blocked + * indicates no callback) + * @throws IllegalStateException if a transactional.id has been configured and no transaction has been started, or + * when send is invoked after producer has been closed. + * @throws TimeoutException if the topic or the partition specified in the record cannot be found in metadata within {@code max.block.ms} + * @throws InterruptException If the thread is interrupted while blocked * @throws SerializationException If the key or value are not valid objects given the configured serializers - * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. + * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. */ @Override public Future send(ProducerRecord record, Callback callback) { @@ -948,6 +1051,23 @@ private void throwIfProducerClosed() { throw new IllegalStateException("Cannot perform operation after producer has been closed"); } + /** + * Throws an exception if the transaction is in a prepared state. + * In a two-phase commit (2PC) flow, once a transaction enters the prepared state, + * only commit, abort, or complete operations are allowed. + * + * @throws IllegalStateException if any other operation is attempted in the prepared state. + */ + private void throwIfInPreparedState() { + if (transactionManager != null && + transactionManager.isTransactional() && + transactionManager.isPrepared() + ) { + throw new IllegalStateException("Cannot perform operation while the transaction is in a prepared state. " + + "Only commitTransaction(), abortTransaction(), or completeTransaction() are permitted."); + } + } + /** * Implementation of asynchronously send a record to a topic. */ @@ -959,6 +1079,8 @@ private Future doSend(ProducerRecord record, Callback call try { throwIfProducerClosed(); + throwIfInPreparedState(); + // first make sure the metadata for the topic is available long nowMs = time.milliseconds(); ClusterAndWaitTime clusterAndWaitTime; @@ -1213,11 +1335,14 @@ public void flush() { /** * Get the partition metadata for the given topic. This can be used for custom partitioning. + *

    + * This will attempt to refresh metadata until it finds the topic in it, or the configured {@link ProducerConfig#MAX_BLOCK_MS_CONFIG} expires. + * * @throws AuthenticationException if authentication fails. See the exception for more details - * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details - * @throws InterruptException if the thread is interrupted while blocked - * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} - * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close + * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details + * @throws InterruptException if the thread is interrupted while blocked + * @throws TimeoutException if the topic cannot be found in metadata within {@code max.block.ms} + * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close */ @Override public List partitionsFor(String topic) { @@ -1483,11 +1608,6 @@ String getClientId() { return clientId; } - // Visible for testing - TransactionManager getTransactionManager() { - return transactionManager; - } - private static class ClusterAndWaitTime { final Cluster cluster; final long waitedOnMetadataMs; @@ -1546,6 +1666,7 @@ private class AppendCallbacks implements RecordAccumulator.AppendCallbacks { private final String recordLogString; private volatile int partition = RecordMetadata.UNKNOWN_PARTITION; private volatile TopicPartition topicPartition; + private final Headers headers; private AppendCallbacks(Callback userCallback, ProducerInterceptors interceptors, ProducerRecord record) { this.userCallback = userCallback; @@ -1554,6 +1675,12 @@ private AppendCallbacks(Callback userCallback, ProducerInterceptors interc // whole lifetime of the batch. // We don't want to have an NPE here, because the interceptors would not be notified (see .doSend). topic = record != null ? record.topic() : null; + if (record != null) { + headers = record.headers(); + } else { + headers = new RecordHeaders(); + ((RecordHeaders) headers).setReadOnly(); + } recordPartition = record != null ? record.partition() : null; recordLogString = log.isTraceEnabled() && record != null ? record.toString() : ""; } @@ -1563,7 +1690,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata == null) { metadata = new RecordMetadata(topicPartition(), -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1); } - this.interceptors.onAcknowledgement(metadata, exception); + this.interceptors.onAcknowledgement(metadata, exception, headers); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java index a4aac86df09fc..3e5cb9f5d5ab3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java @@ -142,7 +142,7 @@ public MockProducer() { } @Override - public void initTransactions() { + public void initTransactions(boolean keepPreparedTxn) { verifyNotClosed(); verifyNotFenced(); if (this.transactionInitialized) { @@ -200,6 +200,18 @@ public void sendOffsetsToTransaction(Map offs this.sentOffsets = true; } + @Override + public PreparedTxnState prepareTransaction() throws ProducerFencedException { + verifyNotClosed(); + verifyNotFenced(); + verifyTransactionsInitialized(); + verifyTransactionInFlight(); + + // Return a new PreparedTxnState with mock values for producerId and epoch + // Using 1000L and (short)1 as arbitrary values for a valid PreparedTxnState + return new PreparedTxnState(1000L, (short) 1); + } + @Override public void commitTransaction() throws ProducerFencedException { verifyNotClosed(); @@ -245,6 +257,27 @@ public void abortTransaction() throws ProducerFencedException { this.transactionInFlight = false; } + @Override + public void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException { + verifyNotClosed(); + verifyNotFenced(); + verifyTransactionsInitialized(); + + if (!this.transactionInFlight) { + throw new IllegalStateException("There is no prepared transaction to complete."); + } + + // For testing purposes, we'll consider a prepared state with producerId=1000L and epoch=1 as valid + // This should match what's returned in prepareTransaction() + PreparedTxnState currentState = new PreparedTxnState(1000L, (short) 1); + + if (currentState.equals(preparedTxnState)) { + commitTransaction(); + } else { + abortTransaction(); + } + } + private synchronized void verifyNotClosed() { if (this.closed) { throw new IllegalStateException("MockProducer is already closed."); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/PreparedTxnState.java b/clients/src/main/java/org/apache/kafka/clients/producer/PreparedTxnState.java new file mode 100644 index 0000000000000..b5ee45e807dab --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/producer/PreparedTxnState.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.producer; + +import org.apache.kafka.common.record.RecordBatch; + +/** + * Class containing the state of a transaction after it has been prepared for a two-phase commit. + * This state includes the producer ID and epoch, which are needed to commit or abort the transaction. + */ +public class PreparedTxnState { + private final long producerId; + private final short epoch; + + /** + * Creates a new empty PreparedTxnState + */ + public PreparedTxnState() { + this.producerId = RecordBatch.NO_PRODUCER_ID; + this.epoch = RecordBatch.NO_PRODUCER_EPOCH; + } + + /** + * Creates a new PreparedTxnState from a serialized string representation + * + * @param serializedState The serialized string to deserialize. + * @throws IllegalArgumentException if the serialized string is not in the expected format + */ + public PreparedTxnState(String serializedState) { + if (serializedState == null || serializedState.isEmpty()) { + this.producerId = RecordBatch.NO_PRODUCER_ID; + this.epoch = RecordBatch.NO_PRODUCER_EPOCH; + return; + } + + try { + String[] parts = serializedState.split(":"); + if (parts.length != 2) { + throw new IllegalArgumentException("Invalid serialized transaction state format: " + serializedState); + } + + this.producerId = Long.parseLong(parts[0]); + this.epoch = Short.parseShort(parts[1]); + + // Validate the producerId and epoch values. + if (!(this.producerId >= 0 && this.epoch >= 0)) { + throw new IllegalArgumentException("Invalid producer ID and epoch values: " + + producerId + ":" + epoch + ". Both must be >= 0"); + } + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid serialized transaction state format: " + serializedState, e); + } + } + + /** + * Creates a new PreparedTxnState with the given producer ID and epoch + * + * @param producerId The producer ID + * @param epoch The producer epoch + */ + PreparedTxnState(long producerId, short epoch) { + this.producerId = producerId; + this.epoch = epoch; + } + + public long producerId() { + return producerId; + } + + public short epoch() { + return epoch; + } + + /** + * Checks if this preparedTxnState represents an initialized transaction with a valid producer ID + * that is not -1 (the uninitialized value). + * + * @return true if the state has an initialized transaction, false otherwise. + */ + public boolean hasTransaction() { + return producerId != RecordBatch.NO_PRODUCER_ID; + } + + /** + * Returns a serialized string representation of this transaction state. + * The format is "producerId:epoch" for an initialized state, or an empty string + * for an uninitialized state (where producerId and epoch are both -1). + * + * @return a serialized string representation + */ + @Override + public String toString() { + if (!hasTransaction()) { + return ""; + } + return producerId + ":" + epoch; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PreparedTxnState that = (PreparedTxnState) o; + return producerId == that.producerId && epoch == that.epoch; + } + + @Override + public int hashCode() { + int result = 31; + result = 31 * result + Long.hashCode(producerId); + result = 31 * result + (int) epoch; + return result; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java index 798034dda6de2..e6e94691e3454 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java @@ -42,7 +42,14 @@ public interface Producer extends Closeable { /** * See {@link KafkaProducer#initTransactions()} */ - void initTransactions(); + default void initTransactions() { + initTransactions(false); + } + + /** + * See {@link KafkaProducer#initTransactions(boolean)} + */ + void initTransactions(boolean keepPreparedTxn); /** * See {@link KafkaProducer#beginTransaction()} @@ -55,6 +62,11 @@ public interface Producer extends Closeable { void sendOffsetsToTransaction(Map offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException; + /** + * See {@link KafkaProducer#prepareTransaction()} + */ + PreparedTxnState prepareTransaction() throws ProducerFencedException; + /** * See {@link KafkaProducer#commitTransaction()} */ @@ -65,6 +77,11 @@ void sendOffsetsToTransaction(Map offsets, */ void abortTransaction() throws ProducerFencedException; + /** + * See {@link KafkaProducer#completeTransaction(PreparedTxnState)} + */ + void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException; + /** * @see KafkaProducer#registerMetricForSubscription(KafkaMetric) */ diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 949c6c167ba8e..313648497bab1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -35,13 +35,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import static org.apache.kafka.common.config.ConfigDef.NO_DEFAULT_VALUE; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Range.between; import static org.apache.kafka.common.config.ConfigDef.ValidString.in; @@ -100,8 +101,10 @@ public class ProducerConfig extends AbstractConfig { + "similar or lower producer latency despite the increased linger."; /** partitioner.adaptive.partitioning.enable */ - public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; - private static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC = + public static final String PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; + @Deprecated + public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG; + private static final String PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_DOC = "When set to 'true', the producer will try to adapt to broker performance and produce more messages to partitions hosted on faster brokers. " + "If 'false', the producer will try to distribute messages uniformly. Note: this setting has no effect if a custom partitioner is used."; @@ -110,7 +113,7 @@ public class ProducerConfig extends AbstractConfig { private static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC = "If a broker cannot process produce requests from a partition for " + PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG + " time, " + "the partitioner treats that partition as not available. If the value is 0, this logic is disabled. " - + "Note: this setting has no effect if a custom partitioner is used or " + PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG + + "Note: this setting has no effect if a custom partitioner is used or " + PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG + " is set to 'false'."; /** partitioner.ignore.keys */ @@ -274,11 +277,12 @@ public class ProducerConfig extends AbstractConfig { /** retries */ public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG; - private static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error." - + " Note that this retry is no different than if the client resent the record upon receiving the error." - + " Produce requests will be failed before the number of retries has been exhausted if the timeout configured by" - + " " + DELIVERY_TIMEOUT_MS_CONFIG + " expires first before successful acknowledgement. Users should generally" - + " prefer to leave this config unset and instead use " + DELIVERY_TIMEOUT_MS_CONFIG + " to control" + private static final String RETRIES_DOC = "Number of times to retry a request that fails with a transient error." + + " Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. " + + " Requests will be retried this many times until they succeed, fail with a non-transient error, or the " + DELIVERY_TIMEOUT_MS_CONFIG + " expires." + + " Note that this automatic retry will simply resend the same record upon receiving the error." + + " Setting a value of zero will disable this automatic retry behaviour, so that the transient errors will be propagated to the application to be handled." + + " Users should generally prefer to leave this config unset and instead use " + DELIVERY_TIMEOUT_MS_CONFIG + " to control" + " retry behavior." + "

    " + "Enabling idempotence requires this config value to be greater than 0." @@ -355,6 +359,11 @@ public class ProducerConfig extends AbstractConfig { "By default the TransactionId is not configured, which means transactions cannot be used. " + "Note that, by default, transactions require a cluster of at least three brokers which is the recommended setting for production; for development you can change this, by adjusting broker setting transaction.state.log.replication.factor."; + /** transaction.two.phase.commit.enable */ + public static final String TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG = "transaction.two.phase.commit.enable"; + private static final String TRANSACTION_TWO_PHASE_COMMIT_ENABLE_DOC = "If set to true, then the broker is informed that the client is participating in " + + "two phase commit protocol and transactions that this client starts never expire."; + /** * security.providers */ @@ -364,7 +373,12 @@ public class ProducerConfig extends AbstractConfig { private static final AtomicInteger PRODUCER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); static { - CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) + CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, + Type.LIST, + NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), + Importance.HIGH, + CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, ClientDnsLookup.USE_ALL_DNS_IPS.toString(), @@ -385,7 +399,7 @@ public class ProducerConfig extends AbstractConfig { .define(COMPRESSION_LZ4_LEVEL_CONFIG, Type.INT, CompressionType.LZ4.defaultLevel(), CompressionType.LZ4.levelValidator(), Importance.MEDIUM, COMPRESSION_LZ4_LEVEL_DOC) .define(COMPRESSION_ZSTD_LEVEL_CONFIG, Type.INT, CompressionType.ZSTD.defaultLevel(), CompressionType.ZSTD.levelValidator(), Importance.MEDIUM, COMPRESSION_ZSTD_LEVEL_DOC) .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) - .define(PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC) + .define(PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_DOC) .define(PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.LOW, PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC) .define(PARTITIONER_IGNORE_KEYS_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, PARTITIONER_IGNORE_KEYS_DOC) .define(LINGER_MS_CONFIG, Type.LONG, 5, atLeast(0), Importance.MEDIUM, LINGER_MS_DOC) @@ -453,7 +467,7 @@ public class ProducerConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), - new ConfigDef.NonNullValidator(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, @@ -492,8 +506,8 @@ public class ProducerConfig extends AbstractConfig { Importance.MEDIUM, PARTITIONER_CLASS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, - Collections.emptyList(), - new ConfigDef.NonNullValidator(), + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, @@ -526,6 +540,11 @@ public class ProducerConfig extends AbstractConfig { new ConfigDef.NonEmptyString(), Importance.LOW, TRANSACTIONAL_ID_DOC) + .define(TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, + Type.BOOLEAN, + false, + Importance.LOW, + TRANSACTION_TWO_PHASE_COMMIT_ENABLE_DOC) .define(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY, @@ -538,7 +557,13 @@ public class ProducerConfig extends AbstractConfig { CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, atLeast(0), Importance.LOW, - CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) + .define(CONFIG_PROVIDERS_CONFIG, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.LOW, + CONFIG_PROVIDERS_DOC); } @Override @@ -609,6 +634,20 @@ private void postProcessAndValidateIdempotenceConfigs(final Map if (!idempotenceEnabled && userConfiguredTransactions) { throw new ConfigException("Cannot set a " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " without also enabling idempotence."); } + + // Validate that transaction.timeout.ms is not set when transaction.two.phase.commit.enable is true + // In standard Kafka transactions, the broker enforces transaction.timeout.ms and aborts any + // transaction that isn't completed in time. With two-phase commit (2PC), an external coordinator + // decides when to finalize, so broker-side timeouts don't apply. Disallow using both. + boolean enable2PC = this.getBoolean(TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG); + boolean userConfiguredTransactionTimeout = originalConfigs.containsKey(TRANSACTION_TIMEOUT_CONFIG); + if (enable2PC && userConfiguredTransactionTimeout) { + throw new ConfigException( + "Cannot set " + ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + + " when " + ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG + + " is set to true. Transactions will not expire with two-phase commit enabled." + ); + } } private static String parseAcks(String acksString) { @@ -643,10 +682,6 @@ public ProducerConfig(Map props) { super(CONFIG, props); } - ProducerConfig(Map props, boolean doLog) { - super(CONFIG, props, doLog); - } - public static Set configNames() { return CONFIG.names(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java index 5bc4b2c2c8520..4a813dc96babc 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.producer; import org.apache.kafka.common.Configurable; +import org.apache.kafka.common.header.Headers; /** * A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before @@ -83,12 +84,37 @@ public interface ProducerInterceptor extends Configurable, AutoCloseable { * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will contain only valid topic and maybe * partition. If partition is not given in ProducerRecord and an error occurs - * before partition gets assigned, then partition will be set to RecordMetadata.NO_PARTITION. + * before partition gets assigned, then partition will be set to {@link RecordMetadata#UNKNOWN_PARTITION}. * The metadata may be null if the client passed null record to * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)}. * @param exception The exception thrown during processing of this record. Null if no error occurred. */ - void onAcknowledgement(RecordMetadata metadata, Exception exception); + default void onAcknowledgement(RecordMetadata metadata, Exception exception) {} + + /** + * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before + * it gets sent to the server. + *

    + * This method is generally called just before the user callback is called, and in additional cases when KafkaProducer.send() + * throws an exception. + *

    + * Any exception thrown by this method will be ignored by the caller. + *

    + * This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. + * Otherwise, sending of messages from other threads could be delayed. + * + * @param metadata The metadata for the record that was sent (i.e. the partition and offset). + * If an error occurred, metadata will contain only valid topic and maybe + * partition. If partition is not given in ProducerRecord and an error occurs + * before partition gets assigned, then partition will be set to {@link RecordMetadata#UNKNOWN_PARTITION}. + * The metadata may be null if the client passed null record to + * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)}. + * @param exception The exception thrown during processing of this record. Null if no error occurred. + * @param headers The headers for the record that was sent. It is read-only. + */ + default void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { + onAcknowledgement(metadata, exception); + } /** * This is called when interceptor is closed diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java index 7d942d572cfd5..6c94466c55e85 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java @@ -33,6 +33,7 @@ public class KafkaProducerMetrics implements AutoCloseable { private static final String TXN_SEND_OFFSETS = "txn-send-offsets"; private static final String TXN_COMMIT = "txn-commit"; private static final String TXN_ABORT = "txn-abort"; + private static final String TXN_PREPARE = "txn-prepare"; private static final String TOTAL_TIME_SUFFIX = "-time-ns-total"; private static final String METADATA_WAIT = "metadata-wait"; @@ -44,6 +45,7 @@ public class KafkaProducerMetrics implements AutoCloseable { private final Sensor sendOffsetsSensor; private final Sensor commitTxnSensor; private final Sensor abortTxnSensor; + private final Sensor prepareTxnSensor; private final Sensor metadataWaitSensor; public KafkaProducerMetrics(Metrics metrics) { @@ -73,6 +75,10 @@ public KafkaProducerMetrics(Metrics metrics) { TXN_ABORT, "Total time producer has spent in abortTransaction in nanoseconds." ); + prepareTxnSensor = newLatencySensor( + TXN_PREPARE, + "Total time producer has spent in prepareTransaction in nanoseconds." + ); metadataWaitSensor = newLatencySensor( METADATA_WAIT, "Total time producer has spent waiting on topic metadata in nanoseconds." @@ -87,6 +93,7 @@ public void close() { removeMetric(TXN_SEND_OFFSETS); removeMetric(TXN_COMMIT); removeMetric(TXN_ABORT); + removeMetric(TXN_PREPARE); removeMetric(METADATA_WAIT); } @@ -114,6 +121,10 @@ public void recordAbortTxn(long duration) { abortTxnSensor.record(duration); } + public void recordPrepareTxn(long duration) { + prepareTxnSensor.record(duration); + } + public void recordMetadataWait(long duration) { metadataWaitSensor.record(duration); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java index 9936eef760940..71d3839cedd12 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java @@ -22,6 +22,8 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.RecordBatch; @@ -77,7 +79,7 @@ public ProducerRecord onSend(ProducerRecord record) { /** * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before - * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} + * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception, Headers)} * method for each interceptor. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. @@ -85,11 +87,12 @@ public ProducerRecord onSend(ProducerRecord record) { * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will only contain valid topic and maybe partition. * @param exception The exception thrown during processing of this record. Null if no error occurred. + * @param headers The headers for the record that was sent */ - public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptorPlugin.get().onAcknowledgement(metadata, exception); + interceptorPlugin.get().onAcknowledgement(metadata, exception, headers); } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); @@ -99,7 +102,7 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { /** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend - * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} + * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception, Headers)} * method for each interceptor * * @param record The record from client @@ -110,14 +113,22 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { public void onSendError(ProducerRecord record, TopicPartition interceptTopicPartition, Exception exception) { for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { + Headers headers = record != null ? record.headers() : new RecordHeaders(); + if (headers instanceof RecordHeaders && !((RecordHeaders) headers).isReadOnly()) { + // make a copy of the headers to make sure we don't change the state of origin record's headers. + // original headers are still writable because client might want to mutate them before retrying. + RecordHeaders recordHeaders = (RecordHeaders) headers; + headers = new RecordHeaders(recordHeaders); + ((RecordHeaders) headers).setReadOnly(); + } if (record == null && interceptTopicPartition == null) { - interceptorPlugin.get().onAcknowledgement(null, exception); + interceptorPlugin.get().onAcknowledgement(null, exception, headers); } else { if (interceptTopicPartition == null) { interceptTopicPartition = extractTopicPartition(record); } interceptorPlugin.get().onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, - RecordBatch.NO_TIMESTAMP, -1, -1), exception); + RecordBatch.NO_TIMESTAMP, -1, -1), exception, headers); } } catch (Exception e) { // do not propagate interceptor exceptions, just log diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index 35cda5e51634b..f0c2719db9612 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -514,7 +514,12 @@ public int splitAndReenqueue(ProducerBatch bigBatch) { // the split doesn't happen too often. CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression.type(), Math.max(1.0f, (float) bigBatch.compressionRatio())); - Deque dq = bigBatch.split(this.batchSize); + int targetSplitBatchSize = this.batchSize; + + if (bigBatch.isSplitBatch()) { + targetSplitBatchSize = Math.max(bigBatch.maxRecordSize, bigBatch.estimatedSizeInBytes() / 2); + } + Deque dq = bigBatch.split(targetSplitBatchSize); int numSplitBatches = dq.size(); Deque partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java index 614fe562d873e..64e8646d6f153 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java @@ -28,6 +28,7 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.FencedLeaderEpochException; @@ -52,6 +53,7 @@ import org.apache.kafka.common.requests.ProduceRequest; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.requests.RequestHeader; +import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; @@ -234,9 +236,6 @@ private boolean hasPendingTransactionalRequests() { public void run() { log.debug("Starting Kafka producer I/O thread."); - if (transactionManager != null) - transactionManager.setPoisonStateOnInvalidTransition(true); - // main loop, runs until close is called while (running) { try { @@ -566,7 +565,7 @@ private boolean awaitNodeReady(Node node, FindCoordinatorRequest.CoordinatorType /** * Handle a produce response */ - private void handleProduceResponse(ClientResponse response, Map batches, long now) { + private void handleProduceResponse(ClientResponse response, Map batches, Map topicNames, long now) { RequestHeader requestHeader = response.requestHeader(); int correlationId = requestHeader.correlationId(); if (response.wasTimedOut()) { @@ -596,7 +595,6 @@ private void handleProduceResponse(ClientResponse response, Map partitionsWithUpdatedLeaderInfo = new HashMap<>(); produceResponse.data().responses().forEach(r -> r.partitionResponses().forEach(p -> { - TopicPartition tp = new TopicPartition(r.name(), p.index()); ProduceResponse.PartitionResponse partResp = new ProduceResponse.PartitionResponse( Errors.forCode(p.errorCode()), p.baseOffset(), @@ -608,7 +606,20 @@ private void handleProduceResponse(ClientResponse response, Map recordsByPartition = new HashMap<>(batches.size()); + Map topicIds = topicIdsForBatches(batches); + ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection(); for (ProducerBatch batch : batches) { TopicPartition tp = batch.topicPartition; MemoryRecords records = batch.records(); - ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic()); + Uuid topicId = topicIds.get(tp.topic()); + ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic(), topicId); + if (tpData == null) { - tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic()); + tpData = new ProduceRequestData.TopicProduceData() + .setTopicId(topicId).setName(tp.topic()); tpd.add(tpData); } + tpData.partitionData().add(new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition()) .setRecords(records)); @@ -885,7 +902,11 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo .setTopicData(tpd), useTransactionV1Version ); - RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds()); + // Fetch topic names from metadata outside callback as topic ids may change during the callback + // for example if topic was recreated. + Map topicNames = metadata.topicNames(); + + RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, topicNames, time.milliseconds()); String nodeId = Integer.toString(destination); ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, @@ -894,6 +915,15 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo log.trace("Sent produce request to {}: {}", nodeId, requestBuilder); } + private Map topicIdsForBatches(List batches) { + return batches.stream() + .collect(Collectors.toMap( + b -> b.topicPartition.topic(), + b -> metadata.topicIds().getOrDefault(b.topicPartition.topic(), Uuid.ZERO_UUID), + (existing, replacement) -> replacement) + ); + } + /** * Wake up the selector associated with this send thread */ @@ -1072,4 +1102,10 @@ void recordBatchSplit() { } } + public static class SenderThread extends KafkaThread { + + public SenderThread(final String name, Runnable runnable, boolean daemon) { + super(name, runnable, daemon); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java index c78134c72ecf2..969085809e656 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java @@ -33,10 +33,12 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidProducerEpochException; +import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.OutOfOrderSequenceException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.errors.TransactionAbortableException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnknownProducerIdException; import org.apache.kafka.common.errors.UnsupportedVersionException; @@ -120,58 +122,6 @@ public class TransactionManager { private final Set newPartitionsInTransaction; private final Set pendingPartitionsInTransaction; private final Set partitionsInTransaction; - - /** - * During its normal course of operations, the transaction manager transitions through different internal - * states (i.e. by updating {@link #currentState}) to one of those defined in {@link State}. These state transitions - * result from actions on one of the following classes of threads: - * - *

      - *
    • Application threads that invokes {@link Producer} API calls
    • - *
    • {@link Sender} thread operations
    • - *
    - * - * When an invalid state transition is detected during execution on an application thread, the - * {@link #currentState} is not updated and an {@link IllegalStateException} is thrown. This gives the - * application the opportunity to fix the issue without permanently poisoning the state of the - * transaction manager. The {@link Producer} API calls that perform a state transition include: - * - *
      - *
    • {@link Producer#initTransactions()} calls {@link #initializeTransactions()}
    • - *
    • {@link Producer#beginTransaction()} calls {@link #beginTransaction()}
    • - *
    • {@link Producer#commitTransaction()}} calls {@link #beginCommit()}
    • - *
    • {@link Producer#abortTransaction()} calls {@link #beginAbort()} - *
    • - *
    • {@link Producer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} calls - * {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} - *
    • - *
    • {@link Producer#send(ProducerRecord)} (and its variants) calls - * {@link #maybeAddPartition(TopicPartition)} and - * {@link #maybeTransitionToErrorState(RuntimeException)} - *
    • - *
    - * - *

    - * - * The {@link Producer} is implemented such that much of its work delegated to and performed asynchronously on the - * {@link Sender} thread. This includes record batching, network I/O, broker response handlers, etc. If an - * invalid state transition is detected in the {@link Sender} thread, in addition to throwing an - * {@link IllegalStateException}, the transaction manager intentionally "poisons" itself by setting its - * {@link #currentState} to {@link State#FATAL_ERROR}, a state from which it cannot recover. - * - *

    - * - * It's important to prevent possible corruption when the transaction manager has determined that it is in a - * fatal state. Subsequent transaction operations attempted via either the application or the - * {@link Sender} thread should fail. This is achieved when these operations invoke the - * {@link #maybeFailWithError()} method, as it causes a {@link KafkaException} to be thrown, ensuring the stated - * transactional guarantees are not violated. - * - *

    - * - * See KAFKA-14831 for more detail. - */ - private final ThreadLocal shouldPoisonStateOnInvalidTransition; private PendingStateTransition pendingTransition; // This is used by the TxnRequestHandlers to control how long to back off before a given request is retried. @@ -195,12 +145,15 @@ public class TransactionManager { private volatile boolean clientSideEpochBumpRequired = false; private volatile long latestFinalizedFeaturesEpoch = -1; private volatile boolean isTransactionV2Enabled = false; + private final boolean enable2PC; + private volatile ProducerIdAndEpoch preparedTxnState = ProducerIdAndEpoch.NONE; private enum State { UNINITIALIZED, INITIALIZING, READY, IN_TRANSACTION, + PREPARED_TRANSACTION, COMMITTING_TRANSACTION, ABORTING_TRANSACTION, ABORTABLE_ERROR, @@ -216,10 +169,12 @@ private boolean isTransitionValid(State source, State target) { return source == INITIALIZING || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION; case IN_TRANSACTION: return source == READY; + case PREPARED_TRANSACTION: + return source == IN_TRANSACTION || source == INITIALIZING; case COMMITTING_TRANSACTION: - return source == IN_TRANSACTION; + return source == IN_TRANSACTION || source == PREPARED_TRANSACTION; case ABORTING_TRANSACTION: - return source == IN_TRANSACTION || source == ABORTABLE_ERROR; + return source == IN_TRANSACTION || source == PREPARED_TRANSACTION || source == ABORTABLE_ERROR; case ABORTABLE_ERROR: return source == IN_TRANSACTION || source == COMMITTING_TRANSACTION || source == ABORTABLE_ERROR || source == INITIALIZING; @@ -255,7 +210,8 @@ public TransactionManager(final LogContext logContext, final String transactionalId, final int transactionTimeoutMs, final long retryBackoffMs, - final ApiVersions apiVersions) { + final ApiVersions apiVersions, + final boolean enable2PC) { this.producerIdAndEpoch = ProducerIdAndEpoch.NONE; this.transactionalId = transactionalId; this.log = logContext.logger(TransactionManager.class); @@ -265,7 +221,6 @@ public TransactionManager(final LogContext logContext, this.newPartitionsInTransaction = new HashSet<>(); this.pendingPartitionsInTransaction = new HashSet<>(); this.partitionsInTransaction = new HashSet<>(); - this.shouldPoisonStateOnInvalidTransition = ThreadLocal.withInitial(() -> false); this.pendingRequests = new PriorityQueue<>(10, Comparator.comparingInt(o -> o.priority().priority)); this.pendingTxnOffsetCommits = new HashMap<>(); this.partitionsWithUnresolvedSequences = new HashMap<>(); @@ -273,17 +228,78 @@ public TransactionManager(final LogContext logContext, this.retryBackoffMs = retryBackoffMs; this.txnPartitionMap = new TxnPartitionMap(logContext); this.apiVersions = apiVersions; + this.enable2PC = enable2PC; + } + + /** + * During its normal course of operations, the transaction manager transitions through different internal + * states (i.e. by updating {@link #currentState}) to one of those defined in {@link State}. These state transitions + * result from actions on one of the following classes of threads: + * + *

      + *
    • Application threads that invokes {@link Producer} API calls
    • + *
    • {@link Sender} thread operations
    • + *
    + * + * When an invalid state transition is detected during execution on an application thread, the + * {@link #currentState} is not updated and an {@link IllegalStateException} is thrown. This gives the + * application the opportunity to fix the issue without permanently poisoning the state of the + * transaction manager. The {@link Producer} API calls that perform a state transition include: + * + *
      + *
    • {@link Producer#initTransactions()} calls {@link #initializeTransactions(boolean)}
    • + *
    • {@link Producer#beginTransaction()} calls {@link #beginTransaction()}
    • + *
    • {@link Producer#commitTransaction()}} calls {@link #beginCommit()}
    • + *
    • {@link Producer#abortTransaction()} calls {@link #beginAbort()} + *
    • + *
    • {@link Producer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} calls + * {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} + *
    • + *
    • {@link Producer#send(ProducerRecord)} (and its variants) calls + * {@link #maybeAddPartition(TopicPartition)} and + * {@link #maybeTransitionToErrorState(RuntimeException)} + *
    • + *
    + * + *

    + * + * The {@link Producer} is implemented such that much of its work delegated to and performed asynchronously on the + * {@link Sender} thread. This includes record batching, network I/O, broker response handlers, etc. If an + * invalid state transition is detected in the {@link Sender} thread, in addition to throwing an + * {@link IllegalStateException}, the transaction manager intentionally "poisons" itself by setting its + * {@link #currentState} to {@link State#FATAL_ERROR}, a state from which it cannot recover. + * + *

    + * + * It's important to prevent possible corruption when the transaction manager has determined that it is in a + * fatal state. Subsequent transaction operations attempted via either the application or the + * {@link Sender} thread should fail. This is achieved when these operations invoke the + * {@link #maybeFailWithError()} method, as it causes a {@link KafkaException} to be thrown, ensuring the stated + * transactional guarantees are not violated. + * + *

    + * + * See KAFKA-14831 for more detail. + * + * @return {@code true} to set state to {@link State#FATAL_ERROR} before throwing an exception, + * {@code false} to throw an exception without first changing the state + */ + protected boolean shouldPoisonStateOnInvalidTransition() { + return Thread.currentThread() instanceof Sender.SenderThread; } - void setPoisonStateOnInvalidTransition(boolean shouldPoisonState) { - shouldPoisonStateOnInvalidTransition.set(shouldPoisonState); + synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) { + return initializeTransactions(producerIdAndEpoch, false); } - public synchronized TransactionalRequestResult initializeTransactions() { - return initializeTransactions(ProducerIdAndEpoch.NONE); + public synchronized TransactionalRequestResult initializeTransactions(boolean keepPreparedTxn) { + return initializeTransactions(ProducerIdAndEpoch.NONE, keepPreparedTxn); } - synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) { + synchronized TransactionalRequestResult initializeTransactions( + ProducerIdAndEpoch producerIdAndEpoch, + boolean keepPreparedTxn + ) { maybeFailWithError(); boolean isEpochBump = producerIdAndEpoch != ProducerIdAndEpoch.NONE; @@ -292,6 +308,9 @@ synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoc if (!isEpochBump) { transitionTo(State.INITIALIZING); log.info("Invoking InitProducerId for the first time in order to acquire a producer ID"); + if (keepPreparedTxn) { + log.info("Invoking InitProducerId with keepPreparedTxn set to true for 2PC transactions"); + } } else { log.info("Invoking InitProducerId with current producer ID and epoch {} in order to bump the epoch", producerIdAndEpoch); } @@ -299,7 +318,10 @@ synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoc .setTransactionalId(transactionalId) .setTransactionTimeoutMs(transactionTimeoutMs) .setProducerId(producerIdAndEpoch.producerId) - .setProducerEpoch(producerIdAndEpoch.epoch); + .setProducerEpoch(producerIdAndEpoch.epoch) + .setEnable2Pc(enable2PC) + .setKeepPreparedTxn(keepPreparedTxn); + InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData), isEpochBump); enqueueRequest(handler); @@ -314,6 +336,22 @@ public synchronized void beginTransaction() { transitionTo(State.IN_TRANSACTION); } + /** + * Prepare a transaction for a two-phase commit. + * This transitions the transaction to the PREPARED_TRANSACTION state. + * The preparedTxnState is set with the current producer ID and epoch. + */ + public synchronized void prepareTransaction() { + ensureTransactional(); + throwIfPendingState("prepareTransaction"); + maybeFailWithError(); + transitionTo(State.PREPARED_TRANSACTION); + this.preparedTxnState = new ProducerIdAndEpoch( + this.producerIdAndEpoch.producerId, + this.producerIdAndEpoch.epoch + ); + } + public synchronized TransactionalRequestResult beginCommit() { return handleCachedTransactionRequestResult(() -> { maybeFailWithError(); @@ -471,6 +509,10 @@ public boolean isTransactionV2Enabled() { return isTransactionV2Enabled; } + public boolean is2PCEnabled() { + return enable2PC; + } + synchronized boolean hasPartitionsToAdd() { return !newPartitionsInTransaction.isEmpty() || !pendingPartitionsInTransaction.isEmpty(); } @@ -729,6 +771,15 @@ public synchronized void maybeTransitionToErrorState(RuntimeException exception) || exception instanceof InvalidPidMappingException) { transitionToFatalError(exception); } else if (isTransactional()) { + // RetriableExceptions from the Sender thread are converted to Abortable errors + // because they indicate that the transaction cannot be completed after all retry attempts. + // This conversion ensures the application layer treats these errors as abortable, + // preventing duplicate message delivery. + if (exception instanceof RetriableException || + exception instanceof InvalidTxnStateException) { + exception = new TransactionAbortableException("Transaction Request was aborted after exhausting retries.", exception); + } + if (needToTriggerEpochBumpFromClient() && !isCompleting()) { clientSideEpochBumpRequired = true; } @@ -871,7 +922,7 @@ synchronized TxnRequestHandler nextRequest(boolean hasIncompleteBatches) { log.debug("Not sending EndTxn for completed transaction since no partitions " + "or offsets were successfully added"); } - completeTransaction(); + resetTransactionState(); } nextRequestHandler = pendingRequests.poll(); } @@ -1042,6 +1093,15 @@ synchronized boolean isInitializing() { return isTransactional() && currentState == State.INITIALIZING; } + /** + * Check if the transaction is in the prepared state. + * + * @return true if the current state is PREPARED_TRANSACTION + */ + public synchronized boolean isPrepared() { + return currentState == State.PREPARED_TRANSACTION; + } + void handleCoordinatorReady() { NodeApiVersions nodeApiVersions = transactionCoordinator != null ? apiVersions.get(transactionCoordinator.idString()) : @@ -1063,7 +1123,7 @@ private void transitionTo(State target, RuntimeException error) { String message = idString + "Invalid transition attempted from state " + currentState.name() + " to state " + target.name(); - if (shouldPoisonStateOnInvalidTransition.get()) { + if (shouldPoisonStateOnInvalidTransition()) { currentState = State.FATAL_ERROR; lastError = new IllegalStateException(message); throw lastError; @@ -1269,7 +1329,7 @@ boolean canHandleAbortableError() { return coordinatorSupportsBumpingEpoch || isTransactionV2Enabled; } - private void completeTransaction() { + private void resetTransactionState() { if (clientSideEpochBumpRequired) { transitionTo(State.INITIALIZING); } else { @@ -1281,6 +1341,7 @@ private void completeTransaction() { newPartitionsInTransaction.clear(); pendingPartitionsInTransaction.clear(); partitionsInTransaction.clear(); + preparedTxnState = ProducerIdAndEpoch.NONE; } abstract class TxnRequestHandler implements RequestCompletionHandler { @@ -1437,7 +1498,21 @@ public void handleResponse(AbstractResponse response) { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(initProducerIdResponse.data().producerId(), initProducerIdResponse.data().producerEpoch()); setProducerIdAndEpoch(producerIdAndEpoch); - transitionTo(State.READY); + // If this is a transaction with keepPreparedTxn=true, transition directly + // to PREPARED_TRANSACTION state IFF there is an ongoing transaction. + if (builder.data.keepPreparedTxn() && + initProducerIdResponse.data().ongoingTxnProducerId() != RecordBatch.NO_PRODUCER_ID + ) { + transitionTo(State.PREPARED_TRANSACTION); + // Update the preparedTxnState with the ongoing pid and epoch from the response. + // This will be used to complete the transaction later. + TransactionManager.this.preparedTxnState = new ProducerIdAndEpoch( + initProducerIdResponse.data().ongoingTxnProducerId(), + initProducerIdResponse.data().ongoingTxnProducerEpoch() + ); + } else { + transitionTo(State.READY); + } lastError = null; if (this.isEpochBump) { resetSequenceNumbers(); @@ -1674,7 +1749,7 @@ boolean isEndTxn() { public void handleResponse(AbstractResponse response) { EndTxnResponse endTxnResponse = (EndTxnResponse) response; Errors error = endTxnResponse.error(); - + boolean isAbort = !builder.data.committed(); if (error == Errors.NONE) { // For End Txn version 5+, the broker includes the producerId and producerEpoch in the EndTxnResponse. // For versions lower than 5, the producer Id and epoch are set to -1 by default. @@ -1691,7 +1766,7 @@ public void handleResponse(AbstractResponse response) { setProducerIdAndEpoch(producerIdAndEpoch); resetSequenceNumbers(); } - completeTransaction(); + resetTransactionState(); result.done(); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); @@ -1707,6 +1782,11 @@ public void handleResponse(AbstractResponse response) { fatalError(error.exception()); } else if (error == Errors.UNKNOWN_PRODUCER_ID) { abortableErrorIfPossible(error.exception()); + } else if (isAbort && error.exception() instanceof TransactionAbortableException) { + // When aborting a transaction, we must convert TRANSACTION_ABORTABLE errors to KafkaException + // because if an abort operation itself encounters an abortable error, retrying the abort would create a cycle. + // Instead, we treat this as fatal error at the application layer to ensure the transaction can be cleanly terminated. + fatalError(new KafkaException("Failed to abort transaction", error.exception())); } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { @@ -1888,5 +1968,14 @@ private PendingStateTransition( } } - + /** + * Returns a ProducerIdAndEpoch object containing the producer ID and epoch + * of the ongoing transaction. + * This is used when preparing a transaction for a two-phase commit. + * + * @return a ProducerIdAndEpoch with the current producer ID and epoch. + */ + public ProducerIdAndEpoch preparedTransactionState() { + return this.preparedTxnState; + } } diff --git a/clients/src/main/java/org/apache/kafka/common/Endpoint.java b/clients/src/main/java/org/apache/kafka/common/Endpoint.java index 8d5e8c6d16a55..baa1045929f8e 100644 --- a/clients/src/main/java/org/apache/kafka/common/Endpoint.java +++ b/clients/src/main/java/org/apache/kafka/common/Endpoint.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common; -import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.security.auth.SecurityProtocol; import java.util.Objects; @@ -26,27 +25,35 @@ * Represents a broker endpoint. */ -@InterfaceStability.Evolving public class Endpoint { - private final String listenerName; + private final String listener; private final SecurityProtocol securityProtocol; private final String host; private final int port; - public Endpoint(String listenerName, SecurityProtocol securityProtocol, String host, int port) { - this.listenerName = listenerName; + public Endpoint(String listener, SecurityProtocol securityProtocol, String host, int port) { + this.listener = listener; this.securityProtocol = securityProtocol; this.host = host; this.port = port; } + /** + * Returns the listener name of this endpoint. + */ + public String listener() { + return listener; + } + /** * Returns the listener name of this endpoint. This is non-empty for endpoints provided * to broker plugins, but may be empty when used in clients. + * @deprecated Since 4.1. Use {@link #listener()} instead. This function will be removed in 5.0. */ + @Deprecated(since = "4.1", forRemoval = true) public Optional listenerName() { - return Optional.ofNullable(listenerName); + return Optional.ofNullable(listener); } /** @@ -80,7 +87,7 @@ public boolean equals(Object o) { } Endpoint that = (Endpoint) o; - return Objects.equals(this.listenerName, that.listenerName) && + return Objects.equals(this.listener, that.listener) && Objects.equals(this.securityProtocol, that.securityProtocol) && Objects.equals(this.host, that.host) && this.port == that.port; @@ -89,13 +96,13 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(listenerName, securityProtocol, host, port); + return Objects.hash(listener, securityProtocol, host, port); } @Override public String toString() { return "Endpoint(" + - "listenerName='" + listenerName + '\'' + + "listenerName='" + listener + '\'' + ", securityProtocol=" + securityProtocol + ", host='" + host + '\'' + ", port=" + port + diff --git a/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java b/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java index 4c2815bb3bda5..aa13ffd9936b0 100644 --- a/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java +++ b/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java @@ -16,9 +16,9 @@ */ package org.apache.kafka.common; -import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.InvalidConfigurationException; -public class InvalidRecordException extends ApiException { +public class InvalidRecordException extends InvalidConfigurationException { private static final long serialVersionUID = 1; diff --git a/clients/src/main/java/org/apache/kafka/common/MetricName.java b/clients/src/main/java/org/apache/kafka/common/MetricName.java index 578c848f103ab..1f5b43104b1b6 100644 --- a/clients/src/main/java/org/apache/kafka/common/MetricName.java +++ b/clients/src/main/java/org/apache/kafka/common/MetricName.java @@ -20,7 +20,7 @@ import java.util.Objects; /** - * The MetricName class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.MetricName(...). + * The MetricName class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.metricName(...). *

    * This class captures the following parameters: *

    diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
    index 9188b1687a808..e271cd99c4cd0 100644
    --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
    +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
    @@ -65,6 +65,10 @@ public class AbstractConfig {
         public static final String AUTOMATIC_CONFIG_PROVIDERS_PROPERTY = "org.apache.kafka.automatic.config.providers";
     
         public static final String CONFIG_PROVIDERS_CONFIG = "config.providers";
    +    public static final String CONFIG_PROVIDERS_DOC = 
    +            "Comma-separated alias names for classes implementing the ConfigProvider interface. " +
    +            "This enables loading configuration data (such as passwords, API keys, and other credentials) from external " +
    +            "sources. For example, see Configuration Providers.";
     
         private static final String CONFIG_PROVIDERS_PARAM = ".param.";
     
    diff --git a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
    index 970d9cebf7231..ee2f8c2cfd951 100644
    --- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
    +++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
    @@ -1006,26 +1006,72 @@ else if (max == null)
         public static class ValidList implements Validator {
     
             final ValidString validString;
    +        final boolean isEmptyAllowed;
    +        final boolean isNullAllowed;
     
    -        private ValidList(List validStrings) {
    +        private ValidList(List validStrings, boolean isEmptyAllowed, boolean isNullAllowed) {
                 this.validString = new ValidString(validStrings);
    +            this.isEmptyAllowed = isEmptyAllowed;
    +            this.isNullAllowed = isNullAllowed;
    +        }
    +
    +        public static ValidList anyNonDuplicateValues(boolean isEmptyAllowed, boolean isNullAllowed) {
    +            return new ValidList(List.of(), isEmptyAllowed, isNullAllowed);
             }
     
             public static ValidList in(String... validStrings) {
    -            return new ValidList(Arrays.asList(validStrings));
    +            return new ValidList(List.of(validStrings), true, false);
    +        }
    +
    +        public static ValidList in(boolean isEmptyAllowed, String... validStrings) {
    +            if (!isEmptyAllowed && validStrings.length == 0) {
    +                throw new IllegalArgumentException("At least one valid string must be provided when empty values are not allowed");
    +            }
    +            return new ValidList(List.of(validStrings), isEmptyAllowed, false);
             }
     
             @Override
             public void ensureValid(final String name, final Object value) {
    +            if (value == null) {
    +                if (isNullAllowed)
    +                    return;
    +                else
    +                    throw new ConfigException("Configuration '" + name + "' values must not be null.");
    +            }
    +
                 @SuppressWarnings("unchecked")
    -            List values = (List) value;
    -            for (String string : values) {
    -                validString.ensureValid(name, string);
    +            List values = (List) value;
    +            if (!isEmptyAllowed && values.isEmpty()) {
    +                String validString = this.validString.validStrings.isEmpty() ? "any non-empty value" : this.validString.toString();
    +                throw new ConfigException("Configuration '" + name + "' must not be empty. Valid values include: " + validString);
    +            }
    +
    +            if (Set.copyOf(values).size() != values.size()) {
    +                throw new ConfigException("Configuration '" + name + "' values must not be duplicated.");
    +            }
    +
    +            validateIndividualValues(name, values);
    +        }
    +
    +        private void validateIndividualValues(String name, List values) {
    +            boolean hasValidStrings = !validString.validStrings.isEmpty();
    +
    +            for (Object value : values) {
    +                if (value instanceof String) {
    +                    String string = (String) value;
    +                    if (string.isEmpty()) {
    +                        throw new ConfigException("Configuration '" + name + "' values must not be empty.");
    +                    }
    +                    if (hasValidStrings) {
    +                        validString.ensureValid(name, value);
    +                    }
    +                }
                 }
             }
     
             public String toString() {
    -            return validString.toString();
    +            return validString + (isEmptyAllowed ? " (empty config allowed)" : " (empty not allowed)") +
    +                    (isNullAllowed ? " (null config allowed)" : " (null not allowed)");
             }
         }
     
    diff --git a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
    index fe7e2eb6669e7..410082d908be6 100644
    --- a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
    +++ b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
    @@ -17,8 +17,6 @@
     
     package org.apache.kafka.common.config;
     
    -import java.util.Arrays;
    -import java.util.HashSet;
     import java.util.Set;
     
     /**
    @@ -64,8 +62,8 @@ public class LogLevelConfig {
          */
         public static final String TRACE_LOG_LEVEL = "TRACE";
     
    -    public static final Set VALID_LOG_LEVELS = new HashSet<>(Arrays.asList(
    +    public static final Set VALID_LOG_LEVELS = Set.of(
                 FATAL_LOG_LEVEL, ERROR_LOG_LEVEL, WARN_LOG_LEVEL,
                 INFO_LOG_LEVEL, DEBUG_LOG_LEVEL, TRACE_LOG_LEVEL
    -    ));
    +    );
     }
    diff --git a/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
    index 15e23270d6f0c..01f7ad1f92718 100644
    --- a/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
    +++ b/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
    @@ -16,8 +16,11 @@
      */
     package org.apache.kafka.common.config;
     
    +import org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString;
     import org.apache.kafka.common.config.ConfigDef.Range;
     
    +import java.util.List;
    +
     public class SaslConfigs {
     
         private static final String OAUTHBEARER_NOTE = " Currently applies only to OAUTHBEARER.";
    @@ -129,6 +132,173 @@ public class SaslConfigs {
                 + " authentication provider."
                 + LOGIN_EXPONENTIAL_BACKOFF_NOTE;
     
    +    public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS = "sasl.oauthbearer.jwt.retriever.class";
    +    public static final String DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS = "org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever";
    +    public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC = "

    The fully-qualified class name of a JwtRetriever implementation used to" + + " request tokens from the identity provider.

    " + + "

    The default configuration value represents a class that maintains backward compatibility with previous versions of" + + " Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create." + + "

    Other implementations that are provided include:

    " + + "
      " + + "
    • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    • " + + "
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
    • " + + "
    • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
    • " + + "
    • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    • " + + "
    "; + + public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS = "sasl.oauthbearer.jwt.validator.class"; + public static final String DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS = "org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator"; + public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC = "

    The fully-qualified class name of a JwtValidator implementation used to" + + " validate the JWT from the identity provider.

    " + + "

    The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous" + + " versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create." + + "

    The built-in JwtValidator implementations are:

    " + + "
      " + + "
    • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
    • " + + "
    • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
    • " + + "
    • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
    • " + + "
    "; + + public static final String SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; + public static final String SASL_OAUTHBEARER_SCOPE_DOC = "

    This is the level of access a client application is granted to a resource or API which is" + + " included in the token request. If provided, it should match one or more scopes configured in the identity provider.

    " + + "

    " + + "The scope was previously stored as part of the sasl.jaas.config configuration with the key scope." + + " For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version." + + "

    " + + "

    Order of precedence:

    " + + "
      " + + "
    • sasl.oauthbearer.scope from configuration
    • " + + "
    • scope from JAAS
    • " + + "
    "; + + public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID = "sasl.oauthbearer.client.credentials.client.id"; + public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC = "

    The ID (defined in/by the OAuth identity provider) to identify the client" + + " requesting the token.

    " + + "

    " + + "The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId." + + " For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version." + + "

    " + + "

    Order of precedence:

    " + + "
      " + + "
    • sasl.oauthbearer.client.credentials.client.id from configuration
    • " + + "
    • clientId from JAAS
    • " + + "
    "; + + public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET = "sasl.oauthbearer.client.credentials.client.secret"; + public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC = "

    The secret (defined by either the user or preassigned, depending on the" + + " identity provider) of the client requesting the token.

    " + + "

    " + + "The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret." + + " For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version." + + "

    " + + "

    Order of precedence:

    " + + "
      " + + "
    • sasl.oauthbearer.client.credentials.client.secret from configuration
    • " + + "
    • clientSecret from JAAS
    • " + + "
    "; + + private static final String ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE = "

    Note: If a value for sasl.oauthbearer.assertion.file is provided," + + " this configuration will be ignored.

    "; + + public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM = "sasl.oauthbearer.assertion.algorithm"; + public static final String DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM = "RS256"; + public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC = "

    The algorithm the Apache Kafka client should use to sign the assertion sent" + + " to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD = "sasl.oauthbearer.assertion.claim.aud"; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC = "

    The JWT aud (Audience) claim which will be included in the " + + " client JWT assertion created locally.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS = "sasl.oauthbearer.assertion.claim.exp.seconds"; + public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS = 300; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC = "

    The number of seconds in the future for which the JWT is valid." + + " The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

    " + + "

    The formula to generate the exp claim is very simple:

    " + + "
    "
    +        + "Let:\n\n"
    +        + "  x = the current timestamp in seconds, on client\n"
    +        + "  y = the value of this configuration\n"
    +        + "\n"
    +        + "Then:\n\n"
    +        + "  exp = x + y\n"
    +        + "
    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS = "sasl.oauthbearer.assertion.claim.iss"; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC = "

    The value to be used as the iss (Issuer) claim which will be included in the" + + " client JWT assertion created locally.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE = "sasl.oauthbearer.assertion.claim.jti.include"; + public static final boolean DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE = false; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC = "

    Flag that determines if the JWT assertion should generate a unique ID for the" + + " JWT and include it in the jti (JWT ID) claim.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS = "sasl.oauthbearer.assertion.claim.nbf.seconds"; + public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS = 60; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC = "

    The number of seconds in the past from which the JWT is valid." + + " The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

    " + + "

    The formula to generate the nbf claim is very simple:

    " + + "
    "
    +        + "Let:\n\n"
    +        + "  x = the current timestamp in seconds, on client\n"
    +        + "  y = the value of this configuration\n"
    +        + "\n"
    +        + "Then:\n\n"
    +        + "  nbf = x - y\n"
    +        + "
    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB = "sasl.oauthbearer.assertion.claim.sub"; + public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC = "

    The value to be used as the sub (Subject) claim which will be included in the" + + " client JWT assertion created locally.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_FILE = "sasl.oauthbearer.assertion.file"; + public static final String SASL_OAUTHBEARER_ASSERTION_FILE_DOC = "

    File that contains a pre-generated JWT assertion.

    " + + "

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when" + + "the file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

    " + + "

    The file contains the assertion in the serialized, three part JWT format:

    " + + "
      " + + "
    1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm)," + + " typ (Type, always the literal value JWT), etc.
    2. " + + "
    3. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer)," + + " sub (Subject), etc.
    4. " + + "
    5. The signature section is the concatenated header and payload sections that was signed using a private key
    6. " + + "
    " + + "

    See RFC 7519 and RFC 7515" + + " for more details on the JWT and JWS formats.

    " + + "

    Note: If a value for sasl.oauthbearer.assertion.file is provided, all other" + + " sasl.oauthbearer.assertion.* configurations are ignored.

    "; + + public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE = "sasl.oauthbearer.assertion.private.key.file"; + public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC = "

    File that contains a private key in the standard PEM format which is used to" + + " sign the JWT assertion sent to the identity provider.

    " + + "

    The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when" + + " the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE = "sasl.oauthbearer.assertion.private.key.passphrase"; + public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC = "

    The optional passphrase to decrypt the private key file specified by" + + " sasl.oauthbearer.assertion.private.key.file.

    " + + "

    Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was" + + " created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now" + + " out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart" + + " the Kafka client using the new passphrase configuration.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + + public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE = "sasl.oauthbearer.assertion.template.file"; + public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC = "

    This optional configuration specifies the file containing the JWT headers and/or" + + " payload claims to be used when creating the JWT assertion.

    " + + "

    Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it." + + " In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims" + + " that are to be included in the JWT.

    " + + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; + public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "sasl.oauthbearer.scope.claim.name"; public static final String DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "scope"; public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC = "The OAuth claim for the scope is often named \"" + DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME + "\", but this (optional)" @@ -143,8 +313,8 @@ public class SaslConfigs { public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC = "The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token" - + " endpoint URL to which requests will be made to login based on the configuration in " + SASL_JAAS_CONFIG + ". If the URL is file-based, it" - + " specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."; + + " endpoint URL to which requests will be made to login based on the configuration in " + SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS + ". If the URL is" + + " file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL = "sasl.oauthbearer.jwks.endpoint.url"; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC = "The OAuth/OIDC provider URL from which the provider's" @@ -215,6 +385,22 @@ public static void addClientSaslSupport(ConfigDef config) { .define(SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS, ConfigDef.Type.INT, null, ConfigDef.Importance.LOW, SASL_LOGIN_READ_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, ConfigDef.Type.CLASS, DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, ConfigDef.Type.CLASS, DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_SCOPE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, CaseInsensitiveValidString.in("ES256", "RS256"), ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, Range.between(0, 86400), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, ConfigDef.Type.BOOLEAN, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, Range.between(0, 3600), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_FILE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC) @@ -223,7 +409,7 @@ public static void addClientSaslSupport(ConfigDef config) { .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, ConfigDef.Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER, ConfigDef.Type.STRING, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE, ConfigDef.Type.BOOLEAN, DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_HEADER_URLENCODE_DOC); } diff --git a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java index 5ca4980fc2220..7675f75a9ab71 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; +import java.util.List; import java.util.Set; import javax.net.ssl.KeyManagerFactory; @@ -34,7 +35,7 @@ public class SslConfigs { + "this config are dependent on the JVM. " + "Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if " + "the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even " - + "if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'."; + + "if it is one of the values in ssl.enabled.protocols and the server only supports 'TLSv1.3'."; public static final String DEFAULT_SSL_PROTOCOL = "TLSv1.3"; @@ -49,12 +50,14 @@ public class SslConfigs { public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + "The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it " + "and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use " - + "cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior."; + + "cases. If this configuration is set to an empty list, Kafka will use the protocols enabled by default in the underlying SSLEngine, " + + "which may include additional protocols depending on the JVM version. " + + "Also see the config documentation for ssl.protocol to understand how it can impact the TLS version negotiation behavior."; public static final String DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.3"; public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " - + "This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; + + "This is optional for client. The values currently supported by the default ssl.engine.factory.class are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; public static final String SSL_KEYSTORE_KEY_CONFIG = "ssl.keystore.key"; @@ -84,7 +87,7 @@ public class SslConfigs { + "the PEM key specified in 'ssl.keystore.key'."; public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; - public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default ssl.engine.factory.class are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; @@ -123,8 +126,8 @@ public class SslConfigs { public static void addClientSslSupport(ConfigDef config) { config.define(SslConfigs.SSL_PROTOCOL_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_PROTOCOL, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROTOCOL_DOC) .define(SslConfigs.SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROVIDER_DOC) - .define(SslConfigs.SSL_CIPHER_SUITES_CONFIG, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SslConfigs.SSL_CIPHER_SUITES_DOC) - .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, ConfigDef.Type.LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SslConfigs.SSL_CIPHER_SUITES_CONFIG, ConfigDef.Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, SslConfigs.SSL_CIPHER_SUITES_DOC) + .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, ConfigDef.Type.LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_KEYSTORE_TYPE_DOC) .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) diff --git a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java index ec3721389a489..e97c39bc61911 100755 --- a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java @@ -21,7 +21,7 @@ *

    Keys that can be used to configure a topic. These keys are useful when creating or reconfiguring a * topic using the AdminClient. * - *

    The intended pattern is for broker configs to include a `log.` prefix. For example, to set the default broker + *

    The intended pattern is for broker configs to include a log. prefix. For example, to set the default broker * cleanup policy, one would set log.cleanup.policy instead of cleanup.policy. Unfortunately, there are many cases * where this pattern is not followed. */ @@ -87,13 +87,13 @@ public class TopicConfig { public static final String LOCAL_LOG_RETENTION_MS_CONFIG = "local.retention.ms"; public static final String LOCAL_LOG_RETENTION_MS_DOC = "The number of milliseconds to keep the local log segment before it gets deleted. " + - "Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal " + - "to `retention.ms` value."; + "Default value is -2, it represents retention.ms value is to be used. The effective value should always be less than or equal " + + "to retention.ms value."; public static final String LOCAL_LOG_RETENTION_BYTES_CONFIG = "local.retention.bytes"; public static final String LOCAL_LOG_RETENTION_BYTES_DOC = "The maximum size of local log segments that can grow for a partition before it " + - "deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be " + - "less than or equal to `retention.bytes` value."; + "deletes the old segments. Default value is -2, it represents retention.bytes value to be used. The effective value should always be " + + "less than or equal to retention.bytes value."; public static final String REMOTE_LOG_COPY_DISABLE_CONFIG = "remote.log.copy.disable"; public static final String REMOTE_LOG_COPY_DISABLE_DOC = "Determines whether tiered data for a topic should become read only," + @@ -104,17 +104,19 @@ public class TopicConfig { public static final String REMOTE_LOG_DELETE_ON_DISABLE_CONFIG = "remote.log.delete.on.disable"; public static final String REMOTE_LOG_DELETE_ON_DISABLE_DOC = "Determines whether tiered data for a topic should be " + "deleted after tiered storage is disabled on a topic. This configuration should be enabled when trying to " + - "set `remote.storage.enable` from true to false"; + "set remote.storage.enable from true to false"; public static final String MAX_MESSAGE_BYTES_CONFIG = "max.message.bytes"; public static final String MAX_MESSAGE_BYTES_DOC = "The largest record batch size allowed by Kafka (after compression if compression is enabled)."; public static final String INDEX_INTERVAL_BYTES_CONFIG = "index.interval.bytes"; - public static final String INDEX_INTERVAL_BYTES_DOC = "This setting controls how frequently " + - "Kafka adds an index entry to its offset index. The default setting ensures that we index a " + - "message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact " + - "position in the log but makes the index larger. You probably don't need to change this."; + public static final String INDEX_INTERVAL_BYTES_DOC = "This setting controls how frequently Kafka " + + "adds entries to its offset index and, conditionally, to its time index. " + + "The default setting ensures that we index a message roughly every 4096 bytes. " + + "More frequent indexing allows reads to jump closer to the exact position in the log " + + "but results in larger index files. You probably don't need to change this." + + "

    Note: the time index will be inserted only when the timestamp is greater than the last indexed timestamp.

    "; public static final String FILE_DELETE_DELAY_MS_CONFIG = "file.delete.delay.ms"; public static final String FILE_DELETE_DELAY_MS_DOC = "The time to wait before deleting a file from the " + @@ -157,26 +159,33 @@ public class TopicConfig { "log compaction, which retains the latest value for each key. " + "It is also possible to specify both policies in a comma-separated list (e.g. \"delete,compact\"). " + "In this case, old segments will be discarded per the retention time and size configuration, " + - "while retained segments will be compacted."; + "while retained segments will be compacted. " + + "An empty list means infinite retention - no cleanup policies will be applied and log segments " + + "will be retained indefinitely. Note that with remote storage enabled, local retention limits " + + "(log.local.retention.ms and log.local.retention.bytes) are still applied to local segments."; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG = "unclean.leader.election.enable"; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC = "Indicates whether to enable replicas " + "not in the ISR set to be elected as leader as a last resort, even though doing so may result in data " + "loss.

    Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader election" + - "thread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option " + + "thread to trigger election periodically (default is 5 minutes). Please run kafka-leader-election.sh with unclean option " + "to trigger the unclean leader election immediately if needed.

    "; public static final String MIN_IN_SYNC_REPLICAS_CONFIG = "min.insync.replicas"; - public static final String MIN_IN_SYNC_REPLICAS_DOC = "When a producer sets acks to \"all\" (or \"-1\"), " + - "this configuration specifies the minimum number of replicas that must acknowledge " + - "a write for the write to be considered successful. If this minimum cannot be met, " + - "then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
    " + + public static final String MIN_IN_SYNC_REPLICAS_DOC = "Specifies the minimum number of in-sync replicas (including the leader) " + + "required for a write to succeed when a producer sets acks to \"all\" (or \"-1\"). In the acks=all " + + "case, every in-sync replica must acknowledge a write for it to be considered successful. E.g., if a topic has " + + "replication.factor of 3 and the ISR set includes all three replicas, then all three replicas must acknowledge an " + + "acks=all write for it to succeed, even if min.insync.replicas happens to be less than 3. " + + "If acks=all and the current ISR set contains fewer than min.insync.replicas members, then the producer " + + "will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
    " + "Regardless of the acks setting, the messages will not be visible to the consumers until " + "they are replicated to all in-sync replicas and the min.insync.replicas condition is met.
    " + "When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. " + "A typical scenario would be to create a topic with a replication factor of 3, " + "set min.insync.replicas to 2, and produce with acks of \"all\". " + - "This will ensure that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers."; + "This ensures that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers." + + "

    Note that when the Eligible Leader Replicas feature is enabled, the semantics of this config changes. Please refer to the ELR section for more info.

    "; public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; public static final String COMPRESSION_TYPE_DOC = "Specify the final compression type for a given topic. " + diff --git a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java index a8947ede15446..48f3948ef9d1f 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java @@ -136,6 +136,10 @@ public class BrokerSecurityConfigs { // The allowlist of the SASL OAUTHBEARER endpoints public static final String ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG = "org.apache.kafka.sasl.oauthbearer.allowed.urls"; public static final String ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT = ""; + + public static final String ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG = "org.apache.kafka.sasl.oauthbearer.allowed.files"; + public static final String ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT = ""; + public static final ConfigDef CONFIG_DEF = new ConfigDef() // General Security Configuration .define(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG, LONG, BrokerSecurityConfigs.DEFAULT_CONNECTIONS_MAX_REAUTH_MS, MEDIUM, BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_DOC) @@ -150,7 +154,7 @@ public class BrokerSecurityConfigs { .define(BrokerSecurityConfigs.SSL_ALLOW_SAN_CHANGES_CONFIG, BOOLEAN, BrokerSecurityConfigs.DEFAULT_SSL_ALLOW_SAN_CHANGES_VALUE, LOW, BrokerSecurityConfigs.SSL_ALLOW_SAN_CHANGES_DOC) .define(SslConfigs.SSL_PROTOCOL_CONFIG, STRING, SslConfigs.DEFAULT_SSL_PROTOCOL, MEDIUM, SslConfigs.SSL_PROTOCOL_DOC) .define(SslConfigs.SSL_PROVIDER_CONFIG, STRING, null, MEDIUM, SslConfigs.SSL_PROVIDER_DOC) - .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.ValidList.anyNonDuplicateValues(true, false), MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, STRING, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, MEDIUM, SslConfigs.SSL_KEYSTORE_TYPE_DOC) .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STRING, null, MEDIUM, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, PASSWORD, null, MEDIUM, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) @@ -190,6 +194,22 @@ public class BrokerSecurityConfigs { .define(SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS, INT, null, LOW, SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS, LONG, SaslConfigs.DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS, LOW, SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS, LONG, SaslConfigs.DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS, LOW, SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, CLASS, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, CLASS, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_SCOPE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, PASSWORD, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, ConfigDef.CaseInsensitiveValidString.in("ES256", "RS256"), MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, ConfigDef.Range.between(0, 86400), LOW, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, BOOLEAN, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, ConfigDef.Range.between(0, 3600), LOW, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE, PASSWORD, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, LOW, SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, LOW, SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC) @@ -198,6 +218,6 @@ public class BrokerSecurityConfigs { .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, LONG, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, LOW, SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, LONG, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, LOW, SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, LOW, SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, LIST, null, LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, LIST, List.of(), LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER, STRING, null, LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC); } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java b/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java index 7a05eba03f2bc..5d1d50701736e 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java @@ -32,7 +32,7 @@ *
  • {@link SslAuthenticationException} if SSL handshake failed due to any {@link SSLException}. * */ -public class AuthenticationException extends ApiException { +public class AuthenticationException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java b/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java index 0471fe643d9a8..8ff29af9597d4 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class AuthorizationException extends ApiException { +public class AuthorizationException extends InvalidConfigurationException { public AuthorizationException(String message) { super(message); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java index 333566a64ba1f..85e63c42ec398 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java @@ -20,12 +20,20 @@ public class InvalidConfigurationException extends ApiException { private static final long serialVersionUID = 1L; + public InvalidConfigurationException(String message, Throwable cause) { + super(message, cause); + } + public InvalidConfigurationException(String message) { super(message); } - public InvalidConfigurationException(String message, Throwable cause) { - super(message, cause); + public InvalidConfigurationException(Throwable cause) { + super(cause); + } + + public InvalidConfigurationException() { + super(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java index 699d5a83a432a..508d73a793f5d 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class InvalidReplicationFactorException extends ApiException { +public class InvalidReplicationFactorException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java index 423c09166b7ba..f861dbfee18bb 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class InvalidRequiredAcksException extends ApiException { +public class InvalidRequiredAcksException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; public InvalidRequiredAcksException(String message) { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java index 344d231ce9e6d..cf0ed5ed5cc9b 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java @@ -27,7 +27,7 @@ * * @see UnknownTopicOrPartitionException */ -public class InvalidTopicException extends ApiException { +public class InvalidTopicException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; private final Set invalidTopics; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java b/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java index d1ba8ff716095..24f563e1c2777 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java @@ -19,7 +19,7 @@ /** * This record batch is larger than the maximum allowable size */ -public class RecordBatchTooLargeException extends ApiException { +public class RecordBatchTooLargeException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java index aa592d552bf00..544a5c122b2f8 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java @@ -17,6 +17,13 @@ package org.apache.kafka.common.errors; public class TransactionAbortableException extends ApiException { + + private static final long serialVersionUID = 1L; + + public TransactionAbortableException(String message, Throwable cause) { + super(message, cause); + } + public TransactionAbortableException(String message) { super(message); } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java index f66298e99c30b..03add5c4ffce8 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java @@ -20,7 +20,7 @@ * The message format version does not support the requested function. For example, if idempotence is * requested and the topic is using a message format older than 0.11.0.0, then this error will be returned. */ -public class UnsupportedForMessageFormatException extends ApiException { +public class UnsupportedForMessageFormatException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; public UnsupportedForMessageFormatException(String message) { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java index 484947b0ae2b4..00da3a1b2b8b2 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java @@ -28,7 +28,7 @@ * is raised from {@link org.apache.kafka.clients.consumer.KafkaConsumer#offsetsForTimes(Map)}, it would * be possible to revert to alternative logic to set the consumer's position. */ -public class UnsupportedVersionException extends ApiException { +public class UnsupportedVersionException extends InvalidConfigurationException { private static final long serialVersionUID = 1L; public UnsupportedVersionException(String message, Throwable cause) { diff --git a/clients/src/main/java/org/apache/kafka/common/header/Header.java b/clients/src/main/java/org/apache/kafka/common/header/Header.java index 58869b41fb777..e1d0aa00a4417 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/Header.java +++ b/clients/src/main/java/org/apache/kafka/common/header/Header.java @@ -16,10 +16,23 @@ */ package org.apache.kafka.common.header; +/** + * A header is a key-value pair. + */ public interface Header { - + + /** + * Returns the key of the header. + * + * @return the header's key; must not be null. + */ String key(); + /** + * Returns the value of the header. + * + * @return the header's value; may be null. + */ byte[] value(); - + } diff --git a/clients/src/main/java/org/apache/kafka/common/header/Headers.java b/clients/src/main/java/org/apache/kafka/common/header/Headers.java index b736cbcabcc9b..9cce54a5c5cc5 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/Headers.java +++ b/clients/src/main/java/org/apache/kafka/common/header/Headers.java @@ -16,12 +16,18 @@ */ package org.apache.kafka.common.header; + +/** + * A mutable ordered collection of {@link Header} objects. Note that multiple headers may have the same {@link Header#key() key}. + *

    + * The order of headers is preserved in the order they were added. + */ public interface Headers extends Iterable

    { /** * Adds a header (key inside), to the end, returning if the operation succeeded. * - * @param header the Header to be added + * @param header the Header to be added. * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ @@ -30,17 +36,18 @@ public interface Headers extends Iterable
    { /** * Creates and adds a header, to the end, returning if the operation succeeded. * - * @param key of the header to be added. - * @param value of the header to be added. + * @param key of the header to be added; must not be null. + * @param value of the header to be added; may be null. * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ Headers add(String key, byte[] value) throws IllegalStateException; /** - * Removes all headers for the given key returning if the operation succeeded. + * Removes all headers for the given key returning if the operation succeeded, + * while preserving the insertion order of the remaining headers. * - * @param key to remove all headers for. + * @param key to remove all headers for; must not be null. * @return this instance of the Headers, once the header is removed. * @throws IllegalStateException is thrown if headers are in a read-only state. */ @@ -49,16 +56,17 @@ public interface Headers extends Iterable
    { /** * Returns just one (the very last) header for the given key, if present. * - * @param key to get the last header for. + * @param key to get the last header for; must not be null. * @return this last header matching the given key, returns null if not present. */ Header lastHeader(String key); /** * Returns all headers for the given key, in the order they were added in, if present. + * The iterator does not support {@link java.util.Iterator#remove()}. * - * @param key to return the headers for. - * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. + * @param key to return the headers for; must not be null. + * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. */ Iterable
    headers(String key); diff --git a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java index 52863c6c0b564..6311f6ca34825 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java +++ b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java @@ -108,6 +108,10 @@ public void setReadOnly() { this.isReadOnly = true; } + public boolean isReadOnly() { + return isReadOnly; + } + public Header[] toArray() { return headers.isEmpty() ? Record.EMPTY_HEADERS : headers.toArray(new Header[0]); } diff --git a/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java b/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java index 96652df9410b5..1ce0557119291 100644 --- a/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java +++ b/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java @@ -24,7 +24,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.function.BiConsumer; @@ -151,46 +150,6 @@ private void update(Map partitionToState) { } } - public static class PartitionState { - private final TopicPartition topicPartition; - private final S value; - public PartitionState(TopicPartition topicPartition, S state) { - this.topicPartition = Objects.requireNonNull(topicPartition); - this.value = Objects.requireNonNull(state); - } - - public S value() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - PartitionState that = (PartitionState) o; - - return topicPartition.equals(that.topicPartition) && value.equals(that.value); - } - - @Override - public int hashCode() { - int result = topicPartition.hashCode(); - result = 31 * result + value.hashCode(); - return result; - } - - public TopicPartition topicPartition() { - return topicPartition; - } - - @Override - public String toString() { - return "PartitionState(" + topicPartition + "=" + value + ')'; - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Gauge.java b/clients/src/main/java/org/apache/kafka/common/metrics/Gauge.java index d71bbd853db16..cad640eea2b9a 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Gauge.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Gauge.java @@ -20,13 +20,4 @@ * A gauge metric is an instantaneous reading of a particular value. */ @FunctionalInterface -public interface Gauge extends MetricValueProvider { - - /** - * Returns the current value associated with this gauge. - * @param config The configuration for this metric - * @param now The POSIX time in milliseconds the measurement is being taken - */ - T value(MetricConfig config, long now); - -} +public interface Gauge extends MetricValueProvider { } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java index 1d31855db53f2..a9203ead0a044 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java @@ -20,6 +20,8 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.utils.Time; +import java.util.Objects; + public final class KafkaMetric implements Metric { private final MetricName metricName; @@ -41,9 +43,7 @@ public KafkaMetric(Object lock, MetricName metricName, MetricValueProvider va MetricConfig config, Time time) { this.metricName = metricName; this.lock = lock; - if (!(valueProvider instanceof Measurable) && !(valueProvider instanceof Gauge)) - throw new IllegalArgumentException("Unsupported metric value provider of class " + valueProvider.getClass()); - this.metricValueProvider = valueProvider; + this.metricValueProvider = Objects.requireNonNull(valueProvider, "valueProvider must not be null"); this.config = config; this.time = time; } @@ -67,20 +67,15 @@ public MetricName metricName() { } /** - * Take the metric and return the value, which could be a {@link Measurable} or a {@link Gauge} + * Take the metric and return the value via {@link MetricValueProvider#value(MetricConfig, long)}. + * * @return Return the metric value - * @throws IllegalStateException if the underlying metric is not a {@link Measurable} or a {@link Gauge}. */ @Override public Object metricValue() { long now = time.milliseconds(); synchronized (this.lock) { - if (isMeasurable()) - return ((Measurable) metricValueProvider).measure(config, now); - else if (this.metricValueProvider instanceof Gauge) - return ((Gauge) metricValueProvider).value(config, now); - else - throw new IllegalStateException("Not a valid metric: " + this.metricValueProvider.getClass()); + return metricValueProvider.value(config, now); } } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java b/clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java index 866cabad685c9..58b9caa06ed9e 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java @@ -22,11 +22,26 @@ public interface Measurable extends MetricValueProvider { /** - * Measure this quantity and return the result as a double + * Measure this quantity and return the result as a double. + * * @param config The configuration for this metric * @param now The POSIX time in milliseconds the measurement is being taken * @return The measured value */ double measure(MetricConfig config, long now); + /** + * Measure this quantity and return the result as a double. + * + * This default implementation delegates to {@link #measure(MetricConfig, long)}. + * + * @param config The configuration for this metric + * @param now The POSIX time in milliseconds the measurement is being taken + * @return The measured value as a {@link Double} + */ + @Override + default Double value(MetricConfig config, long now) { + return measure(config, now); + } + } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/MetricValueProvider.java b/clients/src/main/java/org/apache/kafka/common/metrics/MetricValueProvider.java index 68028e73a8f17..e4d751c2bb2a0 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/MetricValueProvider.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/MetricValueProvider.java @@ -19,10 +19,17 @@ /** * Super-interface for {@link Measurable} or {@link Gauge} that provides * metric values. - *

    - * In the future for Java8 and above, {@link Gauge#value(MetricConfig, long)} will be - * moved to this interface with a default implementation in {@link Measurable} that returns - * {@link Measurable#measure(MetricConfig, long)}. - *

    */ -public interface MetricValueProvider { } +@FunctionalInterface +public interface MetricValueProvider { + + /** + * Returns the current value associated with this metric. + * + * @param config The configuration for this metric + * @param now The POSIX time in milliseconds the measurement is being taken + * @return the current metric value + */ + T value(MetricConfig config, long now); + +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java index 13d8db4b0cb13..182a8c7484931 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java @@ -48,7 +48,7 @@ * A registry of sensors and metrics. *

    * A metric is a named, numerical measurement. A sensor is a handle to record numerical measurements as they occur. Each - * Sensor has zero or more associated metrics. For example a Sensor might represent message sizes and we might associate + * Sensor has zero or more associated metrics. For example, a Sensor might represent message sizes, and we might associate * with this sensor a metric for the average, maximum, or other statistics computed off the sequence of message sizes * that are recorded by the sensor. *

    @@ -58,9 +58,9 @@ * // set up metrics: * Metrics metrics = new Metrics(); // this is the global repository of metrics and sensors * Sensor sensor = metrics.sensor("message-sizes"); - * MetricName metricName = new MetricName("message-size-avg", "producer-metrics"); + * MetricName metricName = metrics.metricName("message-size-avg", "producer-metrics"); * sensor.add(metricName, new Avg()); - * metricName = new MetricName("message-size-max", "producer-metrics"); + * metricName = metrics.metricName("message-size-max", "producer-metrics"); * sensor.add(metricName, new Max()); * * // as messages are sent we record the sizes @@ -553,7 +553,7 @@ public synchronized KafkaMetric removeMetric(MetricName metricName) { try { reporter.metricRemoval(metric); } catch (Exception e) { - log.error("Error when removing metric from " + reporter.getClass().getName(), e); + log.error("Error when removing metric from {}", reporter.getClass().getName(), e); } } log.trace("Removed metric named {}", metricName); @@ -596,7 +596,7 @@ synchronized KafkaMetric registerMetric(KafkaMetric metric) { try { reporter.metricChange(metric); } catch (Exception e) { - log.error("Error when registering metric on " + reporter.getClass().getName(), e); + log.error("Error when registering metric on {}", reporter.getClass().getName(), e); } } log.trace("Registered metric named {}", metricName); @@ -688,7 +688,7 @@ public void close() { log.info("Closing reporter {}", reporter.getClass().getName()); reporter.close(); } catch (Exception e) { - log.error("Error when closing " + reporter.getClass().getName(), e); + log.error("Error when closing {}", reporter.getClass().getName(), e); } } log.info("Metrics reporters closed"); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java index d296234105d7b..5edd9462b80fd 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java @@ -18,7 +18,7 @@ import org.apache.kafka.common.MetricName; -import java.util.Map; +import java.util.LinkedHashMap; /** * This allows plugins to register metrics and sensors. @@ -35,7 +35,7 @@ public interface PluginMetrics { * @param tags Additional tags for the metric * @throws IllegalArgumentException if any of the tag names collide with the default tags for the plugin */ - MetricName metricName(String name, String description, Map tags); + MetricName metricName(String name, String description, LinkedHashMap tags); /** * Add a metric to monitor an object that implements {@link MetricValueProvider}. This metric won't be associated with any diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java b/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java index c109b5789629a..4ea51f3ea4769 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java @@ -45,7 +45,7 @@ public PluginMetricsImpl(Metrics metrics, Map tags) { } @Override - public MetricName metricName(String name, String description, Map tags) { + public MetricName metricName(String name, String description, LinkedHashMap tags) { if (closing) throw new IllegalStateException("This PluginMetrics instance is closed"); for (String tagName : tags.keySet()) { if (this.tags.containsKey(tagName)) { diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java index aea38c72cacdf..847f887a324fb 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java @@ -125,7 +125,7 @@ private static ChannelBuilder create(SecurityProtocol securityProtocol, switch (securityProtocol) { case SSL: requireNonNullMode(connectionMode, securityProtocol); - channelBuilder = new SslChannelBuilder(connectionMode, listenerName, isInterBrokerListener, logContext); + channelBuilder = new SslChannelBuilder(connectionMode, listenerName, isInterBrokerListener); break; case SASL_SSL: case SASL_PLAINTEXT: diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index df6ccd67ce5bb..22c24f8408c89 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -681,4 +681,14 @@ private void swapAuthenticatorsAndBeginReauthentication(ReauthenticationContext public ChannelMetadataRegistry channelMetadataRegistry() { return metadataRegistry; } + + + /** + * Maybe add write interest after re-authentication. This is to ensure that any pending write operation + * is resumed. + */ + public void maybeAddWriteInterestAfterReauth() { + if (send != null) + this.transportLayer.addInterestOps(SelectionKey.OP_WRITE); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java index c6181b81c5e73..cf4ef470af0db 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java @@ -103,7 +103,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); + return Optional.of(principalBuilder); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 151a0fbbd8876..7acf88269ee14 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -551,6 +551,7 @@ void pollSelectionKeys(Set selectionKeys, boolean isReauthentication = channel.successfulAuthentications() > 1; if (isReauthentication) { sensors.successfulReauthentication.record(1.0, readyTimeMs); + channel.maybeAddWriteInterestAfterReauth(); if (channel.reauthenticationLatencyMs() == null) log.warn( "Should never happen: re-authentication latency for a re-authenticated channel was null; continuing..."); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java index b45fb07442e7f..249fcad163a79 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java @@ -26,7 +26,6 @@ import org.apache.kafka.common.security.auth.SslAuthenticationContext; import org.apache.kafka.common.security.ssl.SslFactory; import org.apache.kafka.common.security.ssl.SslPrincipalMapper; -import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; import java.io.Closeable; @@ -53,8 +52,7 @@ public class SslChannelBuilder implements ChannelBuilder, ListenerReconfigurable */ public SslChannelBuilder(ConnectionMode connectionMode, ListenerName listenerName, - boolean isInterBrokerListener, - LogContext logContext) { + boolean isInterBrokerListener) { this.connectionMode = connectionMode; this.listenerName = listenerName; this.isInterBrokerListener = isInterBrokerListener; @@ -164,7 +162,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); + return Optional.of(principalBuilder); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 8534561af8b64..89b952e6ce766 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -116,7 +116,7 @@ public enum ApiKeys { GET_TELEMETRY_SUBSCRIPTIONS(ApiMessageType.GET_TELEMETRY_SUBSCRIPTIONS), PUSH_TELEMETRY(ApiMessageType.PUSH_TELEMETRY), ASSIGN_REPLICAS_TO_DIRS(ApiMessageType.ASSIGN_REPLICAS_TO_DIRS), - LIST_CLIENT_METRICS_RESOURCES(ApiMessageType.LIST_CLIENT_METRICS_RESOURCES), + LIST_CONFIG_RESOURCES(ApiMessageType.LIST_CONFIG_RESOURCES), DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS), SHARE_GROUP_HEARTBEAT(ApiMessageType.SHARE_GROUP_HEARTBEAT), SHARE_GROUP_DESCRIBE(ApiMessageType.SHARE_GROUP_DESCRIBE), diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java b/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java index f643f5b5779b1..c3e2886e656a7 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java @@ -145,6 +145,11 @@ public int remaining() { return buf.remaining(); } + @Override + public Readable slice() { + return new ByteBufferAccessor(buf.slice()); + } + public void flip() { buf.flip(); } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 9092db41783ba..a27a7fcf23c77 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -229,7 +229,7 @@ public enum Errors { "The group member's supported protocols are incompatible with those of existing members " + "or first group member tried to join with empty protocol type or empty protocol list.", InconsistentGroupProtocolException::new), - INVALID_GROUP_ID(24, "The configured groupId is invalid.", + INVALID_GROUP_ID(24, "The group id is invalid.", InvalidGroupIdException::new), UNKNOWN_MEMBER_ID(25, "The coordinator is not aware of this member.", UnknownMemberIdException::new), diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java index 237948f61c97d..1b051d58bf0f0 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.message.RequestHeaderData; import org.apache.kafka.common.message.ResponseHeaderData; import org.apache.kafka.common.protocol.types.BoundField; +import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.TaggedFields; import org.apache.kafka.common.protocol.types.Type; @@ -27,6 +28,7 @@ import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; +import java.util.TreeMap; public class Protocol { @@ -49,7 +51,23 @@ private static void schemaToBnfHtml(Schema schema, StringBuilder b, int indentSi subTypes.put(field.def.name, type.arrayElementType().get()); } } else if (type instanceof TaggedFields) { - b.append("_tagged_fields "); + Map taggedFields = new TreeMap<>(((TaggedFields) type).fields()); + taggedFields.forEach((tag, taggedField) -> { + if (taggedField.type.isArray()) { + b.append("["); + b.append(taggedField.name); + b.append("]"); + if (!subTypes.containsKey(taggedField.name)) + subTypes.put(taggedField.name + "<tag: " + tag.toString() + ">", taggedField.type.arrayElementType().get()); + } else { + b.append(taggedField.name); + if (!subTypes.containsKey(taggedField.name)) + subTypes.put(taggedField.name + "<tag: " + tag.toString() + ">", taggedField.type); + } + b.append("<tag: "); + b.append(tag); + b.append("> "); + }); } else { b.append(field.def.name); b.append(" "); @@ -90,6 +108,12 @@ private static void populateSchemaFields(Schema schema, Set fields) } } + private static void appendFieldNameToTable(String name, StringBuilder b) { + b.append(""); + b.append(name); + b.append(""); + } + private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) { Set fields = new LinkedHashSet<>(); populateSchemaFields(schema, fields); @@ -101,28 +125,12 @@ private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) { b.append(""); for (BoundField field : fields) { b.append("\n"); - b.append(""); - b.append(field.def.name); - b.append(""); - b.append(""); if (field.def.type instanceof TaggedFields) { TaggedFields taggedFields = (TaggedFields) field.def.type; // Only include the field in the table if there are actually tags defined if (taggedFields.numFields() > 0) { - b.append("\n"); - b.append(""); - b.append("\n"); - b.append("\n"); - b.append("\n"); - b.append(""); taggedFields.fields().forEach((tag, taggedField) -> { - b.append("\n"); - b.append(""); - b.append(""); + appendFieldNameToTable(taggedField.name + "<tag: " + tag.toString() + ">", b); b.append(""); b.append("\n"); }); - b.append("
    TagTagged fieldDescription
    "); - b.append(tag); - b.append(""); - b.append(taggedField.name); - b.append(""); b.append(taggedField.docString); if (taggedField.type.isArray()) { @@ -136,11 +144,10 @@ private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) { b.append("
    \n"); - } else { - b.append(field.def.docString); } } else { + appendFieldNameToTable(field.def.name, b); + b.append(""); b.append(field.def.docString); } b.append(""); @@ -208,7 +215,7 @@ public static String toHtml() { // Responses b.append("Responses:
    \n"); Schema[] responses = key.messageType.responseSchemas(); - for (int version = key.oldestVersion(); version < key.latestVersion(); version++) { + for (int version = key.oldestVersion(); version <= key.latestVersion(); version++) { Schema schema = responses[version]; if (schema == null) throw new IllegalStateException("Unexpected null schema for " + key + " with version " + version); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java b/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java index 80bee86748269..a00a7dae596c3 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java @@ -39,6 +39,15 @@ public interface Readable { long readVarlong(); int remaining(); + /** + * Returns a new Readable object whose content will be shared with this object. + *
    + * The content of the new Readable object will start at this Readable's current + * position. The two Readable position will be independent, so read from one will + * not impact the other. + */ + Readable slice(); + default String readString(int length) { byte[] arr = readArray(length); return new String(arr, StandardCharsets.UTF_8); diff --git a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java index 7f78235ab70d4..2f5e2e50dde75 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java @@ -54,33 +54,52 @@ public class FileRecords extends AbstractRecords implements Closeable { * The {@code FileRecords.open} methods should be used instead of this constructor whenever possible. * The constructor is visible for tests. */ - FileRecords(File file, - FileChannel channel, - int start, - int end, - boolean isSlice) throws IOException { + FileRecords( + File file, + FileChannel channel, + int end + ) throws IOException { this.file = file; this.channel = channel; - this.start = start; + this.start = 0; this.end = end; - this.isSlice = isSlice; - this.size = new AtomicInteger(); + this.isSlice = false; - if (isSlice) { - // don't check the file size if this is just a slice view - size.set(end - start); - } else { - if (channel.size() > Integer.MAX_VALUE) - throw new KafkaException("The size of segment " + file + " (" + channel.size() + - ") is larger than the maximum allowed segment size of " + Integer.MAX_VALUE); + if (channel.size() > Integer.MAX_VALUE) { + throw new KafkaException( + "The size of segment " + file + " (" + channel.size() + + ") is larger than the maximum allowed segment size of " + Integer.MAX_VALUE + ); + } - int limit = Math.min((int) channel.size(), end); - size.set(limit - start); + int limit = Math.min((int) channel.size(), end); + this.size = new AtomicInteger(limit - start); - // if this is not a slice, update the file pointer to the end of the file - // set the file position to the last byte in the file - channel.position(limit); - } + // update the file position to the end of the file + channel.position(limit); + + batches = batchesFrom(start); + } + + /** + * Constructor for creating a slice. + * + * This overloaded constructor avoids having to declare a checked IO exception. + */ + private FileRecords( + File file, + FileChannel channel, + int start, + int end + ) { + this.file = file; + this.channel = channel; + this.start = start; + this.end = end; + this.isSlice = true; + + // don't check the file size since this is just a slice view + this.size = new AtomicInteger(end - start); batches = batchesFrom(start); } @@ -120,22 +139,12 @@ public void readInto(ByteBuffer buffer, int position) throws IOException { buffer.flip(); } - /** - * Return a slice of records from this instance, which is a view into this set starting from the given position - * and with the given size limit. - * - * If the size is beyond the end of the file, the end will be based on the size of the file at the time of the read. - * - * If this message set is already sliced, the position will be taken relative to that slicing. - * - * @param position The start position to begin the read from - * @param size The number of bytes after the start position to include - * @return A sliced wrapper on this message set limited based on the given position and size - */ - public FileRecords slice(int position, int size) throws IOException { + @Override + public FileRecords slice(int position, int size) { int availableBytes = availableBytes(position, size); int startPosition = this.start + position; - return new FileRecords(file, channel, startPosition, startPosition + availableBytes, true); + + return new FileRecords(file, channel, startPosition, startPosition + availableBytes); } /** @@ -202,6 +211,10 @@ public void flush() throws IOException { * Close this record set */ public void close() throws IOException { + if (!channel.isOpen()) { + return; + } + flush(); trim(); channel.close(); @@ -299,7 +312,7 @@ public int writeTo(TransferableChannel destChannel, int offset, int length) thro */ public LogOffsetPosition searchForOffsetFromPosition(long targetOffset, int startingPosition) { FileChannelRecordBatch prevBatch = null; - // The following logic is intentionally designed to minimize memory usage by avoiding + // The following logic is intentionally designed to minimize memory usage by avoiding // unnecessary calls to lastOffset() for every batch. // Instead, we use baseOffset() comparisons when possible, and only check lastOffset() when absolutely necessary. for (FileChannelRecordBatch batch : batchesFrom(startingPosition)) { @@ -307,14 +320,14 @@ public LogOffsetPosition searchForOffsetFromPosition(long targetOffset, int star if (batch.baseOffset() == targetOffset) { return LogOffsetPosition.fromBatch(batch); } - + // If we find the first batch with baseOffset greater than targetOffset if (batch.baseOffset() > targetOffset) { // If the previous batch contains the target if (prevBatch != null && prevBatch.lastOffset() >= targetOffset) return LogOffsetPosition.fromBatch(prevBatch); else { - // If there's no previous batch or the previous batch doesn't contain the + // If there's no previous batch or the previous batch doesn't contain the // target, return the current batch return LogOffsetPosition.fromBatch(batch); } @@ -323,7 +336,7 @@ public LogOffsetPosition searchForOffsetFromPosition(long targetOffset, int star } // Only one case would reach here: all batches have baseOffset less than targetOffset // Check if the last batch contains the target - if (prevBatch != null && prevBatch.lastOffset() >= targetOffset) + if (prevBatch != null && prevBatch.lastOffset() >= targetOffset) return LogOffsetPosition.fromBatch(prevBatch); return null; @@ -435,7 +448,7 @@ public static FileRecords open(File file, boolean preallocate) throws IOException { FileChannel channel = openChannel(file, mutable, fileAlreadyExists, initFileSize, preallocate); int end = (!fileAlreadyExists && preallocate) ? 0 : Integer.MAX_VALUE; - return new FileRecords(file, channel, 0, end, false); + return new FileRecords(file, channel, end); } public static FileRecords open(File file, @@ -486,7 +499,7 @@ public static class LogOffsetPosition { public final long offset; public final int position; public final int size; - + public static LogOffsetPosition fromBatch(FileChannelRecordBatch batch) { return new LogOffsetPosition(batch.baseOffset(), batch.position(), batch.sizeInBytes()); } diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java index c2fd231e4b7cd..2e2b97dfe37ba 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java @@ -300,6 +300,31 @@ public ByteBuffer buffer() { return buffer.duplicate(); } + @Override + public MemoryRecords slice(int position, int size) { + if (position < 0) + throw new IllegalArgumentException("Invalid position: " + position + " in read from " + this); + if (position > buffer.limit()) + throw new IllegalArgumentException("Slice from position " + position + " exceeds end position of " + this); + if (size < 0) + throw new IllegalArgumentException("Invalid size: " + size + " in read from " + this); + + int availableBytes = Math.min(size, buffer.limit() - position); + // As of now, clients module support Java11 hence can't use ByteBuffer::slice(position, size) method. + // So we need to create a duplicate buffer and set the position and limit. Duplicate buffer + // is backed by original bytes hence not the content but only the relative position and limit + // are changed in the duplicate buffer. Once the position and limit are set, we can call the + // slice method to get the sliced buffer, which is a backed by the original buffer with the + // position reset to 0 and limit set to the size of the slice. + ByteBuffer slicedBuffer = buffer.duplicate(); + slicedBuffer.position(position); + slicedBuffer.limit(position + availableBytes); + // Reset the position to 0 so that the sliced view has a relative position. + slicedBuffer = slicedBuffer.slice(); + + return readableRecords(slicedBuffer); + } + @Override public Iterable batches() { return batches; diff --git a/clients/src/main/java/org/apache/kafka/common/record/Records.java b/clients/src/main/java/org/apache/kafka/common/record/Records.java index ec710394bec09..017c49ba94cdb 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/Records.java +++ b/clients/src/main/java/org/apache/kafka/common/record/Records.java @@ -90,4 +90,19 @@ public interface Records extends TransferableRecords { * @return The record iterator */ Iterable records(); + + /** + * Return a slice of records from this instance, which is a view into this set starting from the given position + * and with the given size limit. + * + * If the size is beyond the end of the records, the end will be based on the size of the records at the time of the read. + * + * If this records set is already sliced, the position will be taken relative to that slicing. + * + * @param position The start position to begin the read from. The position should be aligned to + * the batch boundary, else the returned records can't be iterated. + * @param size The number of bytes after the start position to include + * @return A sliced wrapper on this message set limited based on the given position and size + */ + Records slice(int position, int size); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index a3a101ad5348e..750de2050f432 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -138,7 +138,7 @@ public String toString(boolean verbose) { } @Override - public final String toString() { + public String toString() { return toString(true); } @@ -316,8 +316,8 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return PushTelemetryRequest.parse(readable, apiVersion); case ASSIGN_REPLICAS_TO_DIRS: return AssignReplicasToDirsRequest.parse(readable, apiVersion); - case LIST_CLIENT_METRICS_RESOURCES: - return ListClientMetricsResourcesRequest.parse(readable, apiVersion); + case LIST_CONFIG_RESOURCES: + return ListConfigResourcesRequest.parse(readable, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequest.parse(readable, apiVersion); case SHARE_GROUP_HEARTBEAT: diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 5bf6186d049ea..bc313078d7424 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -18,8 +18,10 @@ import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.SendBuilder; import java.nio.ByteBuffer; @@ -51,8 +53,8 @@ final ByteBuffer serializeWithHeader(ResponseHeader header, short version) { } // Visible for testing - final ByteBuffer serialize(short version) { - return MessageUtil.toByteBufferAccessor(data(), version).buffer(); + final ByteBufferAccessor serialize(short version) { + return MessageUtil.toByteBufferAccessor(data(), version); } /** @@ -106,189 +108,189 @@ public static AbstractResponse parseResponse(ByteBuffer buffer, RequestHeader re requestHeader.correlationId(), responseHeader.correlationId()); } - return AbstractResponse.parseResponse(apiKey, buffer, apiVersion); + return AbstractResponse.parseResponse(apiKey, new ByteBufferAccessor(buffer), apiVersion); } - public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer responseBuffer, short version) { + public static AbstractResponse parseResponse(ApiKeys apiKey, Readable readable, short version) { switch (apiKey) { case PRODUCE: - return ProduceResponse.parse(responseBuffer, version); + return ProduceResponse.parse(readable, version); case FETCH: - return FetchResponse.parse(responseBuffer, version); + return FetchResponse.parse(readable, version); case LIST_OFFSETS: - return ListOffsetsResponse.parse(responseBuffer, version); + return ListOffsetsResponse.parse(readable, version); case METADATA: - return MetadataResponse.parse(responseBuffer, version); + return MetadataResponse.parse(readable, version); case OFFSET_COMMIT: - return OffsetCommitResponse.parse(responseBuffer, version); + return OffsetCommitResponse.parse(readable, version); case OFFSET_FETCH: - return OffsetFetchResponse.parse(responseBuffer, version); + return OffsetFetchResponse.parse(readable, version); case FIND_COORDINATOR: - return FindCoordinatorResponse.parse(responseBuffer, version); + return FindCoordinatorResponse.parse(readable, version); case JOIN_GROUP: - return JoinGroupResponse.parse(responseBuffer, version); + return JoinGroupResponse.parse(readable, version); case HEARTBEAT: - return HeartbeatResponse.parse(responseBuffer, version); + return HeartbeatResponse.parse(readable, version); case LEAVE_GROUP: - return LeaveGroupResponse.parse(responseBuffer, version); + return LeaveGroupResponse.parse(readable, version); case SYNC_GROUP: - return SyncGroupResponse.parse(responseBuffer, version); + return SyncGroupResponse.parse(readable, version); case DESCRIBE_GROUPS: - return DescribeGroupsResponse.parse(responseBuffer, version); + return DescribeGroupsResponse.parse(readable, version); case LIST_GROUPS: - return ListGroupsResponse.parse(responseBuffer, version); + return ListGroupsResponse.parse(readable, version); case SASL_HANDSHAKE: - return SaslHandshakeResponse.parse(responseBuffer, version); + return SaslHandshakeResponse.parse(readable, version); case API_VERSIONS: - return ApiVersionsResponse.parse(responseBuffer, version); + return ApiVersionsResponse.parse(readable, version); case CREATE_TOPICS: - return CreateTopicsResponse.parse(responseBuffer, version); + return CreateTopicsResponse.parse(readable, version); case DELETE_TOPICS: - return DeleteTopicsResponse.parse(responseBuffer, version); + return DeleteTopicsResponse.parse(readable, version); case DELETE_RECORDS: - return DeleteRecordsResponse.parse(responseBuffer, version); + return DeleteRecordsResponse.parse(readable, version); case INIT_PRODUCER_ID: - return InitProducerIdResponse.parse(responseBuffer, version); + return InitProducerIdResponse.parse(readable, version); case OFFSET_FOR_LEADER_EPOCH: - return OffsetsForLeaderEpochResponse.parse(responseBuffer, version); + return OffsetsForLeaderEpochResponse.parse(readable, version); case ADD_PARTITIONS_TO_TXN: - return AddPartitionsToTxnResponse.parse(responseBuffer, version); + return AddPartitionsToTxnResponse.parse(readable, version); case ADD_OFFSETS_TO_TXN: - return AddOffsetsToTxnResponse.parse(responseBuffer, version); + return AddOffsetsToTxnResponse.parse(readable, version); case END_TXN: - return EndTxnResponse.parse(responseBuffer, version); + return EndTxnResponse.parse(readable, version); case WRITE_TXN_MARKERS: - return WriteTxnMarkersResponse.parse(responseBuffer, version); + return WriteTxnMarkersResponse.parse(readable, version); case TXN_OFFSET_COMMIT: - return TxnOffsetCommitResponse.parse(responseBuffer, version); + return TxnOffsetCommitResponse.parse(readable, version); case DESCRIBE_ACLS: - return DescribeAclsResponse.parse(responseBuffer, version); + return DescribeAclsResponse.parse(readable, version); case CREATE_ACLS: - return CreateAclsResponse.parse(responseBuffer, version); + return CreateAclsResponse.parse(readable, version); case DELETE_ACLS: - return DeleteAclsResponse.parse(responseBuffer, version); + return DeleteAclsResponse.parse(readable, version); case DESCRIBE_CONFIGS: - return DescribeConfigsResponse.parse(responseBuffer, version); + return DescribeConfigsResponse.parse(readable, version); case ALTER_CONFIGS: - return AlterConfigsResponse.parse(responseBuffer, version); + return AlterConfigsResponse.parse(readable, version); case ALTER_REPLICA_LOG_DIRS: - return AlterReplicaLogDirsResponse.parse(responseBuffer, version); + return AlterReplicaLogDirsResponse.parse(readable, version); case DESCRIBE_LOG_DIRS: - return DescribeLogDirsResponse.parse(responseBuffer, version); + return DescribeLogDirsResponse.parse(readable, version); case SASL_AUTHENTICATE: - return SaslAuthenticateResponse.parse(responseBuffer, version); + return SaslAuthenticateResponse.parse(readable, version); case CREATE_PARTITIONS: - return CreatePartitionsResponse.parse(responseBuffer, version); + return CreatePartitionsResponse.parse(readable, version); case CREATE_DELEGATION_TOKEN: - return CreateDelegationTokenResponse.parse(responseBuffer, version); + return CreateDelegationTokenResponse.parse(readable, version); case RENEW_DELEGATION_TOKEN: - return RenewDelegationTokenResponse.parse(responseBuffer, version); + return RenewDelegationTokenResponse.parse(readable, version); case EXPIRE_DELEGATION_TOKEN: - return ExpireDelegationTokenResponse.parse(responseBuffer, version); + return ExpireDelegationTokenResponse.parse(readable, version); case DESCRIBE_DELEGATION_TOKEN: - return DescribeDelegationTokenResponse.parse(responseBuffer, version); + return DescribeDelegationTokenResponse.parse(readable, version); case DELETE_GROUPS: - return DeleteGroupsResponse.parse(responseBuffer, version); + return DeleteGroupsResponse.parse(readable, version); case ELECT_LEADERS: - return ElectLeadersResponse.parse(responseBuffer, version); + return ElectLeadersResponse.parse(readable, version); case INCREMENTAL_ALTER_CONFIGS: - return IncrementalAlterConfigsResponse.parse(responseBuffer, version); + return IncrementalAlterConfigsResponse.parse(readable, version); case ALTER_PARTITION_REASSIGNMENTS: - return AlterPartitionReassignmentsResponse.parse(responseBuffer, version); + return AlterPartitionReassignmentsResponse.parse(readable, version); case LIST_PARTITION_REASSIGNMENTS: - return ListPartitionReassignmentsResponse.parse(responseBuffer, version); + return ListPartitionReassignmentsResponse.parse(readable, version); case OFFSET_DELETE: - return OffsetDeleteResponse.parse(responseBuffer, version); + return OffsetDeleteResponse.parse(readable, version); case DESCRIBE_CLIENT_QUOTAS: - return DescribeClientQuotasResponse.parse(responseBuffer, version); + return DescribeClientQuotasResponse.parse(readable, version); case ALTER_CLIENT_QUOTAS: - return AlterClientQuotasResponse.parse(responseBuffer, version); + return AlterClientQuotasResponse.parse(readable, version); case DESCRIBE_USER_SCRAM_CREDENTIALS: - return DescribeUserScramCredentialsResponse.parse(responseBuffer, version); + return DescribeUserScramCredentialsResponse.parse(readable, version); case ALTER_USER_SCRAM_CREDENTIALS: - return AlterUserScramCredentialsResponse.parse(responseBuffer, version); + return AlterUserScramCredentialsResponse.parse(readable, version); case VOTE: - return VoteResponse.parse(responseBuffer, version); + return VoteResponse.parse(readable, version); case BEGIN_QUORUM_EPOCH: - return BeginQuorumEpochResponse.parse(responseBuffer, version); + return BeginQuorumEpochResponse.parse(readable, version); case END_QUORUM_EPOCH: - return EndQuorumEpochResponse.parse(responseBuffer, version); + return EndQuorumEpochResponse.parse(readable, version); case DESCRIBE_QUORUM: - return DescribeQuorumResponse.parse(responseBuffer, version); + return DescribeQuorumResponse.parse(readable, version); case ALTER_PARTITION: - return AlterPartitionResponse.parse(responseBuffer, version); + return AlterPartitionResponse.parse(readable, version); case UPDATE_FEATURES: - return UpdateFeaturesResponse.parse(responseBuffer, version); + return UpdateFeaturesResponse.parse(readable, version); case ENVELOPE: - return EnvelopeResponse.parse(responseBuffer, version); + return EnvelopeResponse.parse(readable, version); case FETCH_SNAPSHOT: - return FetchSnapshotResponse.parse(responseBuffer, version); + return FetchSnapshotResponse.parse(readable, version); case DESCRIBE_CLUSTER: - return DescribeClusterResponse.parse(responseBuffer, version); + return DescribeClusterResponse.parse(readable, version); case DESCRIBE_PRODUCERS: - return DescribeProducersResponse.parse(responseBuffer, version); + return DescribeProducersResponse.parse(readable, version); case BROKER_REGISTRATION: - return BrokerRegistrationResponse.parse(responseBuffer, version); + return BrokerRegistrationResponse.parse(readable, version); case BROKER_HEARTBEAT: - return BrokerHeartbeatResponse.parse(responseBuffer, version); + return BrokerHeartbeatResponse.parse(readable, version); case UNREGISTER_BROKER: - return UnregisterBrokerResponse.parse(responseBuffer, version); + return UnregisterBrokerResponse.parse(readable, version); case DESCRIBE_TRANSACTIONS: - return DescribeTransactionsResponse.parse(responseBuffer, version); + return DescribeTransactionsResponse.parse(readable, version); case LIST_TRANSACTIONS: - return ListTransactionsResponse.parse(responseBuffer, version); + return ListTransactionsResponse.parse(readable, version); case ALLOCATE_PRODUCER_IDS: - return AllocateProducerIdsResponse.parse(responseBuffer, version); + return AllocateProducerIdsResponse.parse(readable, version); case CONSUMER_GROUP_HEARTBEAT: - return ConsumerGroupHeartbeatResponse.parse(responseBuffer, version); + return ConsumerGroupHeartbeatResponse.parse(readable, version); case CONSUMER_GROUP_DESCRIBE: - return ConsumerGroupDescribeResponse.parse(responseBuffer, version); + return ConsumerGroupDescribeResponse.parse(readable, version); case CONTROLLER_REGISTRATION: - return ControllerRegistrationResponse.parse(responseBuffer, version); + return ControllerRegistrationResponse.parse(readable, version); case GET_TELEMETRY_SUBSCRIPTIONS: - return GetTelemetrySubscriptionsResponse.parse(responseBuffer, version); + return GetTelemetrySubscriptionsResponse.parse(readable, version); case PUSH_TELEMETRY: - return PushTelemetryResponse.parse(responseBuffer, version); + return PushTelemetryResponse.parse(readable, version); case ASSIGN_REPLICAS_TO_DIRS: - return AssignReplicasToDirsResponse.parse(responseBuffer, version); - case LIST_CLIENT_METRICS_RESOURCES: - return ListClientMetricsResourcesResponse.parse(responseBuffer, version); + return AssignReplicasToDirsResponse.parse(readable, version); + case LIST_CONFIG_RESOURCES: + return ListConfigResourcesResponse.parse(readable, version); case DESCRIBE_TOPIC_PARTITIONS: - return DescribeTopicPartitionsResponse.parse(responseBuffer, version); + return DescribeTopicPartitionsResponse.parse(readable, version); case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatResponse.parse(responseBuffer, version); + return ShareGroupHeartbeatResponse.parse(readable, version); case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeResponse.parse(responseBuffer, version); + return ShareGroupDescribeResponse.parse(readable, version); case SHARE_FETCH: - return ShareFetchResponse.parse(responseBuffer, version); + return ShareFetchResponse.parse(readable, version); case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeResponse.parse(responseBuffer, version); + return ShareAcknowledgeResponse.parse(readable, version); case ADD_RAFT_VOTER: - return AddRaftVoterResponse.parse(responseBuffer, version); + return AddRaftVoterResponse.parse(readable, version); case REMOVE_RAFT_VOTER: - return RemoveRaftVoterResponse.parse(responseBuffer, version); + return RemoveRaftVoterResponse.parse(readable, version); case UPDATE_RAFT_VOTER: - return UpdateRaftVoterResponse.parse(responseBuffer, version); + return UpdateRaftVoterResponse.parse(readable, version); case INITIALIZE_SHARE_GROUP_STATE: - return InitializeShareGroupStateResponse.parse(responseBuffer, version); + return InitializeShareGroupStateResponse.parse(readable, version); case READ_SHARE_GROUP_STATE: - return ReadShareGroupStateResponse.parse(responseBuffer, version); + return ReadShareGroupStateResponse.parse(readable, version); case WRITE_SHARE_GROUP_STATE: - return WriteShareGroupStateResponse.parse(responseBuffer, version); + return WriteShareGroupStateResponse.parse(readable, version); case DELETE_SHARE_GROUP_STATE: - return DeleteShareGroupStateResponse.parse(responseBuffer, version); + return DeleteShareGroupStateResponse.parse(readable, version); case READ_SHARE_GROUP_STATE_SUMMARY: - return ReadShareGroupStateSummaryResponse.parse(responseBuffer, version); + return ReadShareGroupStateSummaryResponse.parse(readable, version); case STREAMS_GROUP_HEARTBEAT: - return StreamsGroupHeartbeatResponse.parse(responseBuffer, version); + return StreamsGroupHeartbeatResponse.parse(readable, version); case STREAMS_GROUP_DESCRIBE: - return StreamsGroupDescribeResponse.parse(responseBuffer, version); + return StreamsGroupDescribeResponse.parse(readable, version); case DESCRIBE_SHARE_GROUP_OFFSETS: - return DescribeShareGroupOffsetsResponse.parse(responseBuffer, version); + return DescribeShareGroupOffsetsResponse.parse(readable, version); case ALTER_SHARE_GROUP_OFFSETS: - return AlterShareGroupOffsetsResponse.parse(responseBuffer, version); + return AlterShareGroupOffsetsResponse.parse(readable, version); case DELETE_SHARE_GROUP_OFFSETS: - return DeleteShareGroupOffsetsResponse.parse(responseBuffer, version); + return DeleteShareGroupOffsetsResponse.parse(readable, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java index d90afd04ddcde..ad3946b432b6f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; /** @@ -66,8 +65,8 @@ public AddOffsetsToTxnResponseData data() { return data; } - public static AddOffsetsToTxnResponse parse(ByteBuffer buffer, short version) { - return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData(new ByteBufferAccessor(buffer), version)); + public static AddOffsetsToTxnResponse parse(Readable readable, short version) { + return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java index 0abf85bf0239a..645fd667186b8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java @@ -24,10 +24,9 @@ import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResultCollection; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -150,8 +149,8 @@ public AddPartitionsToTxnResponseData data() { return data; } - public static AddPartitionsToTxnResponse parse(ByteBuffer buffer, short version) { - return new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData(new ByteBufferAccessor(buffer), version)); + public static AddPartitionsToTxnResponse parse(Readable readable, short version) { + return new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java index ab0600b618406..52a0cb05feb76 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -58,8 +57,8 @@ public Map errorCounts() { } } - public static AddRaftVoterResponse parse(ByteBuffer buffer, short version) { + public static AddRaftVoterResponse parse(Readable readable, short version) { return new AddRaftVoterResponse( - new AddRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); + new AddRaftVoterResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java index 2511e2b2db320..4c47651193188 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -65,8 +64,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static AllocateProducerIdsResponse parse(ByteBuffer buffer, short version) { + public static AllocateProducerIdsResponse parse(Readable readable, short version) { return new AllocateProducerIdsResponse(new AllocateProducerIdsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java index 9987b47be1d2d..9c4990dd3c719 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java @@ -21,11 +21,10 @@ import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntityData; import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntryData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.quota.ClientQuotaEntity; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumMap; import java.util.HashMap; @@ -97,8 +96,8 @@ private static List toEntityData(ClientQuotaEntity entity) { return entityData; } - public static AlterClientQuotasResponse parse(ByteBuffer buffer, short version) { - return new AlterClientQuotasResponse(new AlterClientQuotasResponseData(new ByteBufferAccessor(buffer), version)); + public static AlterClientQuotasResponse parse(Readable readable, short version) { + return new AlterClientQuotasResponse(new AlterClientQuotasResponseData(readable, version)); } public static AlterClientQuotasResponse fromQuotaEntities(Map result, int throttleTimeMs) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java index 1668c2446bc77..5f7b9421cb8e9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.AlterConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; import java.util.stream.Collectors; @@ -65,8 +64,8 @@ public AlterConfigsResponseData data() { return data; } - public static AlterConfigsResponse parse(ByteBuffer buffer, short version) { - return new AlterConfigsResponse(new AlterConfigsResponseData(new ByteBufferAccessor(buffer), version)); + public static AlterConfigsResponse parse(Readable readable, short version) { + return new AlterConfigsResponse(new AlterConfigsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java index ba9a875616d7c..691a399761dec 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -35,9 +34,9 @@ public AlterPartitionReassignmentsResponse(AlterPartitionReassignmentsResponseDa this.data = data; } - public static AlterPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) { + public static AlterPartitionReassignmentsResponse parse(Readable readable, short version) { return new AlterPartitionReassignmentsResponse( - new AlterPartitionReassignmentsResponseData(new ByteBufferAccessor(buffer), version)); + new AlterPartitionReassignmentsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java index a6f9809a6efbc..580be92b8999e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.AlterPartitionResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -60,7 +59,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static AlterPartitionResponse parse(ByteBuffer buffer, short version) { - return new AlterPartitionResponse(new AlterPartitionResponseData(new ByteBufferAccessor(buffer), version)); + public static AlterPartitionResponse parse(Readable readable, short version) { + return new AlterPartitionResponse(new AlterPartitionResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java index bf715ae0575c0..755fc98038a26 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -67,8 +66,8 @@ public Map errorCounts() { return errorCounts; } - public static AlterReplicaLogDirsResponse parse(ByteBuffer buffer, short version) { - return new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData(new ByteBufferAccessor(buffer), version)); + public static AlterReplicaLogDirsResponse parse(Readable readable, short version) { + return new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsRequest.java index 3417c1ae9c5aa..be04568e1a395 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsRequest.java @@ -23,15 +23,11 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - public class AlterShareGroupOffsetsRequest extends AbstractRequest { private final AlterShareGroupOffsetsRequestData data; - public AlterShareGroupOffsetsRequest(AlterShareGroupOffsetsRequestData data, short version) { + private AlterShareGroupOffsetsRequest(AlterShareGroupOffsetsRequestData data, short version) { super(ApiKeys.ALTER_SHARE_GROUP_OFFSETS, version); this.data = data; } @@ -41,11 +37,7 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); - data.topics().forEach( - topicResult -> results.add(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() - .setTopicName(topicResult.topicName()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() - .setPartitionIndex(partitionData.partitionIndex()) - .setErrorCode(Errors.forException(e).code())) - .collect(Collectors.toList())))); - return new AlterShareGroupOffsetsResponse(new AlterShareGroupOffsetsResponseData() - .setResponses(results)); + public AlterShareGroupOffsetsResponse getErrorResponse(int throttleTimeMs, Throwable e) { + return getErrorResponse(throttleTimeMs, Errors.forException(e)); + } + + public AlterShareGroupOffsetsResponse getErrorResponse(int throttleTimeMs, Errors error) { + return getErrorResponse(throttleTimeMs, error.code(), error.message()); + } + + public AlterShareGroupOffsetsResponse getErrorResponse(int throttleTimeMs, short errorCode, String message) { + return new AlterShareGroupOffsetsResponse( + new AlterShareGroupOffsetsResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(errorCode) + .setErrorMessage(message) + ); + } + + public static AlterShareGroupOffsetsResponseData getErrorResponseData(Errors error) { + return getErrorResponseData(error, null); + } + + public static AlterShareGroupOffsetsResponseData getErrorResponseData(Errors error, String errorMessage) { + return new AlterShareGroupOffsetsResponseData() + .setErrorCode(error.code()) + .setErrorMessage(errorMessage == null ? error.message() : errorMessage); } public static AlterShareGroupOffsetsRequest parse(Readable readable, short version) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsResponse.java index 3d4db8ac0999f..5da47d0d326bf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterShareGroupOffsetsResponse.java @@ -17,12 +17,14 @@ package org.apache.kafka.common.requests; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData; +import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic; +import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -38,6 +40,7 @@ public AlterShareGroupOffsetsResponse(AlterShareGroupOffsetsResponseData data) { @Override public Map errorCounts() { Map counts = new EnumMap<>(Errors.class); + updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach(topic -> topic.partitions().forEach(partitionResponse -> updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode())) )); @@ -59,9 +62,52 @@ public AlterShareGroupOffsetsResponseData data() { return data; } - public static AlterShareGroupOffsetsResponse parse(ByteBuffer buffer, short version) { + public static AlterShareGroupOffsetsResponse parse(Readable readable, short version) { return new AlterShareGroupOffsetsResponse( - new AlterShareGroupOffsetsResponseData(new ByteBufferAccessor(buffer), version) + new AlterShareGroupOffsetsResponseData(readable, version) ); } + + public static class Builder { + AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData(); + AlterShareGroupOffsetsResponseTopicCollection topics = new AlterShareGroupOffsetsResponseTopicCollection(); + + private AlterShareGroupOffsetsResponseTopic getOrCreateTopic(String topic, Uuid topicId) { + AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic topicData = topics.find(topic); + if (topicData == null) { + topicData = new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() + .setTopicName(topic) + .setTopicId(topicId == null ? Uuid.ZERO_UUID : topicId); + topics.add(topicData); + } + return topicData; + } + + public Builder addPartition(String topic, int partition, Map topicIdsToNames, ApiError error) { + AlterShareGroupOffsetsResponseTopic topicData = getOrCreateTopic(topic, topicIdsToNames.get(topic)); + topicData.partitions().add(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() + .setPartitionIndex(partition) + .setErrorCode(error.error().code()) + .setErrorMessage(error.message())); + return this; + } + + public AlterShareGroupOffsetsResponse build() { + data.setResponses(topics); + return new AlterShareGroupOffsetsResponse(data); + } + + public Builder merge(AlterShareGroupOffsetsResponseData data, Map topicIdsToNames) { + data.responses().forEach(topic -> { + AlterShareGroupOffsetsResponseTopic newTopic = getOrCreateTopic(topic.topicName(), topicIdsToNames.get(topic.topicName())); + topic.partitions().forEach(partition -> newTopic.partitions().add( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() + .setPartitionIndex(partition.partitionIndex()) + .setErrorCode(partition.errorCode()) + .setErrorMessage(partition.errorMessage()))); + }); + return this; + + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java index 52dba26b4ffae..7f2991cfad7f9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java @@ -17,10 +17,14 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData; +import org.apache.kafka.common.message.AlterUserScramCredentialsRequestDataJsonConverter; import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Readable; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; + import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -81,4 +85,16 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .collect(Collectors.toList()); return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData().setResults(results)); } + + // Do not print salt or saltedPassword + @Override + public String toString() { + JsonNode json = AlterUserScramCredentialsRequestDataJsonConverter.write(data, version()).deepCopy(); + + for (JsonNode upsertion : json.get("upsertions")) { + ((ObjectNode) upsertion).put("salt", ""); + ((ObjectNode) upsertion).put("saltedPassword", ""); + } + return AlterUserScramCredentialsRequestDataJsonConverter.read(json, version()).toString(); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java index 86c9b006a2ce0..bc448a9e1042f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class AlterUserScramCredentialsResponse extends AbstractResponse { @@ -58,7 +57,7 @@ public Map errorCounts() { return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static AlterUserScramCredentialsResponse parse(ByteBuffer buffer, short version) { - return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version)); + public static AlterUserScramCredentialsResponse parse(Readable readable, short version) { + return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java index 324e527984d08..daaa7ba2fce7b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java @@ -29,10 +29,9 @@ import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKeyCollection; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -152,19 +151,18 @@ public boolean zkMigrationReady() { return data.zkMigrationReady(); } - public static ApiVersionsResponse parse(ByteBuffer buffer, short version) { + public static ApiVersionsResponse parse(Readable readable, short version) { // Fallback to version 0 for ApiVersions response. If a client sends an ApiVersionsRequest // using a version higher than that supported by the broker, a version 0 response is sent // to the client indicating UNSUPPORTED_VERSION. When the client receives the response, it // falls back while parsing it which means that the version received by this // method is not necessarily the real one. It may be version 0 as well. - int prev = buffer.position(); + Readable readableCopy = readable.slice(); try { - return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), version)); + return new ApiVersionsResponse(new ApiVersionsResponseData(readable, version)); } catch (RuntimeException e) { - buffer.position(prev); if (version != 0) - return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), (short) 0)); + return new ApiVersionsResponse(new ApiVersionsResponseData(readableCopy, (short) 0)); else throw e; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java index 90912956029cc..84f86d058ec65 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.AssignReplicasToDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -54,8 +53,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static AssignReplicasToDirsResponse parse(ByteBuffer buffer, short version) { + public static AssignReplicasToDirsResponse parse(Readable readable, short version) { return new AssignReplicasToDirsResponse(new AssignReplicasToDirsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java index 3801d49ab9336..7f77c10e93859 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.BeginQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -77,8 +76,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static BeginQuorumEpochResponse parse(ByteBuffer buffer, short version) { - return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(new ByteBufferAccessor(buffer), version)); + public static BeginQuorumEpochResponse parse(Readable readable, short version) { + return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java index b6521666ab57e..f46e56ca50eef 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.BrokerHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -56,8 +55,8 @@ public Map errorCounts() { return errorCounts; } - public static BrokerHeartbeatResponse parse(ByteBuffer buffer, short version) { - return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData(new ByteBufferAccessor(buffer), version)); + public static BrokerHeartbeatResponse parse(Readable readable, short version) { + return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java index e636b84116993..be8a2f1f50601 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.BrokerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -56,8 +55,8 @@ public Map errorCounts() { return errorCounts; } - public static BrokerRegistrationResponse parse(ByteBuffer buffer, short version) { - return new BrokerRegistrationResponse(new BrokerRegistrationResponseData(new ByteBufferAccessor(buffer), version)); + public static BrokerRegistrationResponse parse(Readable readable, short version) { + return new BrokerRegistrationResponse(new BrokerRegistrationResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java index 0f3458af85a77..25ec8b6fbe235 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -70,9 +69,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ConsumerGroupDescribeResponse parse(ByteBuffer buffer, short version) { + public static ConsumerGroupDescribeResponse parse(Readable readable, short version) { return new ConsumerGroupDescribeResponse( - new ConsumerGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) + new ConsumerGroupDescribeResponseData(readable, version) ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java index 2d8b91c3d24bc..1d6fb4e682d67 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java @@ -16,14 +16,18 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; /** * Possible error codes. @@ -69,8 +73,21 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ConsumerGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { + public static ConsumerGroupHeartbeatResponse parse(Readable readable, short version) { return new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); + } + + public static ConsumerGroupHeartbeatResponseData.Assignment createAssignment( + Map> assignment + ) { + List topicPartitions = assignment.entrySet().stream() + .map(keyValue -> new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(keyValue.getKey()) + .setPartitions(new ArrayList<>(keyValue.getValue()))) + .collect(Collectors.toList()); + + return new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(topicPartitions); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java index d44e915b5fa5a..9cc53db15a9a4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.ControllerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -54,8 +53,8 @@ public Map errorCounts() { return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); } - public static ControllerRegistrationResponse parse(ByteBuffer buffer, short version) { + public static ControllerRegistrationResponse parse(Readable readable, short version) { return new ControllerRegistrationResponse( - new ControllerRegistrationResponseData(new ByteBufferAccessor(buffer), version)); + new ControllerRegistrationResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java index cef7b73ac27e9..2f2877ad510f7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.CreateAclsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -57,8 +56,8 @@ public Map errorCounts() { return errorCounts(results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static CreateAclsResponse parse(ByteBuffer buffer, short version) { - return new CreateAclsResponse(new CreateAclsResponseData(new ByteBufferAccessor(buffer), version)); + public static CreateAclsResponse parse(Readable readable, short version) { + return new CreateAclsResponse(new CreateAclsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java index 0a9f9a8991bdc..ce577d48d9779 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java @@ -18,8 +18,8 @@ import org.apache.kafka.common.message.CreateDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; import java.nio.ByteBuffer; @@ -34,9 +34,9 @@ public CreateDelegationTokenResponse(CreateDelegationTokenResponseData data) { this.data = data; } - public static CreateDelegationTokenResponse parse(ByteBuffer buffer, short version) { + public static CreateDelegationTokenResponse parse(Readable readable, short version) { return new CreateDelegationTokenResponse( - new CreateDelegationTokenResponseData(new ByteBufferAccessor(buffer), version)); + new CreateDelegationTokenResponseData(readable, version)); } public static CreateDelegationTokenResponse prepareResponse(int version, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java index 485a4a34020a4..86d8672e19b34 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.CreatePartitionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -49,8 +48,8 @@ public Map errorCounts() { return counts; } - public static CreatePartitionsResponse parse(ByteBuffer buffer, short version) { - return new CreatePartitionsResponse(new CreatePartitionsResponseData(new ByteBufferAccessor(buffer), version)); + public static CreatePartitionsResponse parse(Readable readable, short version) { + return new CreatePartitionsResponse(new CreatePartitionsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java index baed1af16fe2f..be36ff7df5b77 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.CreateTopicsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -74,8 +73,8 @@ public Map errorCounts() { return counts; } - public static CreateTopicsResponse parse(ByteBuffer buffer, short version) { - return new CreateTopicsResponse(new CreateTopicsResponseData(new ByteBufferAccessor(buffer), version)); + public static CreateTopicsResponse parse(Readable readable, short version) { + return new CreateTopicsResponse(new CreateTopicsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java index 6b759d7cee5f5..789c00148699c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java @@ -25,8 +25,8 @@ import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; @@ -35,7 +35,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -75,8 +74,8 @@ public Map errorCounts() { return errorCounts(filterResults().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static DeleteAclsResponse parse(ByteBuffer buffer, short version) { - return new DeleteAclsResponse(new DeleteAclsResponseData(new ByteBufferAccessor(buffer), version), version); + public static DeleteAclsResponse parse(Readable readable, short version) { + return new DeleteAclsResponse(new DeleteAclsResponseData(readable, version), version); } public String toString() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java index f41abe1a1449d..d1939581a08e0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.DeleteGroupsResponseData; import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -77,8 +76,8 @@ public Map errorCounts() { return counts; } - public static DeleteGroupsResponse parse(ByteBuffer buffer, short version) { - return new DeleteGroupsResponse(new DeleteGroupsResponseData(new ByteBufferAccessor(buffer), version)); + public static DeleteGroupsResponse parse(Readable readable, short version) { + return new DeleteGroupsResponse(new DeleteGroupsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java index 4db4f4b66f565..b440dec72dc4e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.DeleteRecordsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -72,8 +71,8 @@ public Map errorCounts() { return errorCounts; } - public static DeleteRecordsResponse parse(ByteBuffer buffer, short version) { - return new DeleteRecordsResponse(new DeleteRecordsResponseData(new ByteBufferAccessor(buffer), version)); + public static DeleteRecordsResponse parse(Readable readable, short version) { + return new DeleteRecordsResponse(new DeleteRecordsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupOffsetsRequest.java index f96bad8d17836..1e28115bada87 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupOffsetsRequest.java @@ -29,11 +29,7 @@ public static class Builder extends AbstractRequest.Builder errorCounts() { Map counts = new EnumMap<>(Errors.class); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach( - topicResult -> topicResult.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + topicResult -> updateErrorCounts(counts, Errors.forCode(topicResult.errorCode())) ); return counts; } @@ -61,9 +58,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static DeleteShareGroupOffsetsResponse parse(ByteBuffer buffer, short version) { + public static DeleteShareGroupOffsetsResponse parse(Readable readable, short version) { return new DeleteShareGroupOffsetsResponse( - new DeleteShareGroupOffsetsResponseData(new ByteBufferAccessor(buffer), version) + new DeleteShareGroupOffsetsResponseData(readable, version) ); } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java index 93d0a3b8de062..c15e76328e1eb 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code())) - .collect(Collectors.toList())))); + topicResult -> results.add(new DeleteShareGroupStateResponseData.DeleteStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new DeleteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code())) + .collect(Collectors.toList())))); return new DeleteShareGroupStateResponse(new DeleteShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -81,8 +77,8 @@ public DeleteShareGroupStateRequestData data() { public static DeleteShareGroupStateRequest parse(Readable readable, short version) { return new DeleteShareGroupStateRequest( - new DeleteShareGroupStateRequestData(readable, version), - version + new DeleteShareGroupStateRequestData(readable, version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java index 3126b82ff0976..e7da3e048c4a0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java @@ -21,12 +21,10 @@ import org.apache.kafka.common.message.DeleteShareGroupStateRequestData; import org.apache.kafka.common.message.DeleteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -65,9 +63,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static DeleteShareGroupStateResponse parse(ByteBuffer buffer, short version) { + public static DeleteShareGroupStateResponse parse(Readable readable, short version) { return new DeleteShareGroupStateResponse( - new DeleteShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) + new DeleteShareGroupStateResponseData(readable, version) ); } @@ -105,7 +103,7 @@ public static DeleteShareGroupStateResponseData.PartitionResult toResponsePartit public static DeleteShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { return new DeleteShareGroupStateResponseData().setResults( - Collections.singletonList(new DeleteShareGroupStateResponseData.DeleteStateResult() + List.of(new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId) .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() .setPartition(partitionId) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java index f2b11ea4e8dd1..efc26026d8449 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.DeleteTopicsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -69,8 +68,8 @@ public Map errorCounts() { return counts; } - public static DeleteTopicsResponse parse(ByteBuffer buffer, short version) { - return new DeleteTopicsResponse(new DeleteTopicsResponseData(new ByteBufferAccessor(buffer), version)); + public static DeleteTopicsResponse parse(Readable readable, short version) { + return new DeleteTopicsResponse(new DeleteTopicsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java index 424ff563a4c8a..c387dbd4da3b3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java @@ -26,13 +26,12 @@ import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription; import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -89,8 +88,8 @@ public final List acls() { return data.resources(); } - public static DescribeAclsResponse parse(ByteBuffer buffer, short version) { - return new DescribeAclsResponse(new DescribeAclsResponseData(new ByteBufferAccessor(buffer), version), version); + public static DescribeAclsResponse parse(Readable readable, short version) { + return new DescribeAclsResponse(new DescribeAclsResponseData(readable, version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java index 3a052c9fe8eba..5d9a980ea3c36 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java @@ -22,11 +22,10 @@ import org.apache.kafka.common.message.DescribeClientQuotasResponseData.EntryData; import org.apache.kafka.common.message.DescribeClientQuotasResponseData.ValueData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.quota.ClientQuotaEntity; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -85,8 +84,8 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static DescribeClientQuotasResponse parse(ByteBuffer buffer, short version) { - return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeClientQuotasResponse parse(Readable readable, short version) { + return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData(readable, version)); } public static DescribeClientQuotasResponse fromQuotaEntities(Map> entities, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java index 7c892874214e8..0b8724a57602b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.message.DescribeClusterResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; @@ -63,7 +62,7 @@ public DescribeClusterResponseData data() { return data; } - public static DescribeClusterResponse parse(ByteBuffer buffer, short version) { - return new DescribeClusterResponse(new DescribeClusterResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeClusterResponse parse(Readable readable, short version) { + return new DescribeClusterResponse(new DescribeClusterResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java index c8c3fab584c7a..f291af273b84a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.DescribeConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collection; import java.util.EnumMap; import java.util.Map; @@ -248,8 +247,8 @@ public Map errorCounts() { return errorCounts; } - public static DescribeConfigsResponse parse(ByteBuffer buffer, short version) { - return new DescribeConfigsResponse(new DescribeConfigsResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeConfigsResponse parse(Readable readable, short version) { + return new DescribeConfigsResponse(new DescribeConfigsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java index a922f056a89aa..d0476a3772caf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java @@ -20,13 +20,12 @@ import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationToken; import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationTokenRenewer; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.token.delegation.DelegationToken; import org.apache.kafka.common.security.token.delegation.TokenInformation; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,9 +75,9 @@ public DescribeDelegationTokenResponse(DescribeDelegationTokenResponseData data) this.data = data; } - public static DescribeDelegationTokenResponse parse(ByteBuffer buffer, short version) { + public static DescribeDelegationTokenResponse parse(Readable readable, short version) { return new DescribeDelegationTokenResponse(new DescribeDelegationTokenResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java index 2171493b053ab..4d59aee8758ab 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java @@ -20,11 +20,10 @@ import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroup; import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.utils.Utils; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.EnumMap; import java.util.List; @@ -145,8 +144,8 @@ public Map errorCounts() { return errorCounts; } - public static DescribeGroupsResponse parse(ByteBuffer buffer, short version) { - return new DescribeGroupsResponse(new DescribeGroupsResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeGroupsResponse parse(Readable readable, short version) { + return new DescribeGroupsResponse(new DescribeGroupsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java index bbfc54d294d15..37929c0b3d8fd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.DescribeLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -64,8 +63,8 @@ public Map errorCounts() { return errorCounts; } - public static DescribeLogDirsResponse parse(ByteBuffer buffer, short version) { - return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeLogDirsResponse parse(Readable readable, short version) { + return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java index 065a101bed6e8..1b30862c4dc5b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.message.DescribeProducersResponseData.PartitionResponse; import org.apache.kafka.common.message.DescribeProducersResponseData.TopicResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -51,9 +50,9 @@ public Map errorCounts() { return errorCounts; } - public static DescribeProducersResponse parse(ByteBuffer buffer, short version) { + public static DescribeProducersResponse parse(Readable readable, short version) { return new DescribeProducersResponse(new DescribeProducersResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java index 5ce367bfed29e..c3b33d48052cd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.DescribeQuorumResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.EnumMap; import java.util.Map; @@ -106,7 +105,7 @@ public static DescribeQuorumResponseData singletonResponse( return res; } - public static DescribeQuorumResponse parse(ByteBuffer buffer, short version) { - return new DescribeQuorumResponse(new DescribeQuorumResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeQuorumResponse parse(Readable readable, short version) { + return new DescribeQuorumResponse(new DescribeQuorumResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java index 8dfe418162f60..bc859511a7517 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder errorCounts() { return errorCounts; } - public static DescribeTopicPartitionsResponse prepareResponse( - int throttleTimeMs, - List topics - ) { - DescribeTopicPartitionsResponseData responseData = new DescribeTopicPartitionsResponseData(); - responseData.setThrottleTimeMs(throttleTimeMs); - topics.forEach(topicResponse -> responseData.topics().add(topicResponse)); - return new DescribeTopicPartitionsResponse(responseData); - } - - public static DescribeTopicPartitionsResponse parse(ByteBuffer buffer, short version) { + public static DescribeTopicPartitionsResponse parse(Readable readable, short version) { return new DescribeTopicPartitionsResponse( - new DescribeTopicPartitionsResponseData(new ByteBufferAccessor(buffer), version)); + new DescribeTopicPartitionsResponseData(readable, version)); } public static TopicPartitionInfo partitionToTopicPartitionInfo( diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java index 061129f8b2aac..e69bcddd9ac88 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.DescribeTransactionsResponseData; import org.apache.kafka.common.message.DescribeTransactionsResponseData.TransactionState; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -49,9 +48,9 @@ public Map errorCounts() { return errorCounts; } - public static DescribeTransactionsResponse parse(ByteBuffer buffer, short version) { + public static DescribeTransactionsResponse parse(Readable readable, short version) { return new DescribeTransactionsResponse(new DescribeTransactionsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java index 58ba4212949c6..fad733fd44f5b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class DescribeUserScramCredentialsResponse extends AbstractResponse { @@ -58,7 +57,7 @@ public Map errorCounts() { return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static DescribeUserScramCredentialsResponse parse(ByteBuffer buffer, short version) { - return new DescribeUserScramCredentialsResponse(new DescribeUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version)); + public static DescribeUserScramCredentialsResponse parse(Readable readable, short version) { + return new DescribeUserScramCredentialsResponse(new DescribeUserScramCredentialsResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java index d6c7466472ce0..1a1546980a8cf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java @@ -21,10 +21,9 @@ import org.apache.kafka.common.message.ElectLeadersResponseData; import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.HashMap; import java.util.List; @@ -80,8 +79,8 @@ public Map errorCounts() { return counts; } - public static ElectLeadersResponse parse(ByteBuffer buffer, short version) { - return new ElectLeadersResponse(new ElectLeadersResponseData(new ByteBufferAccessor(buffer), version)); + public static ElectLeadersResponse parse(Readable readable, short version) { + return new ElectLeadersResponse(new ElectLeadersResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java index 7750ada7969db..fe39d80087e2d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -76,7 +75,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static EndQuorumEpochResponse parse(ByteBuffer buffer, short version) { - return new EndQuorumEpochResponse(new EndQuorumEpochResponseData(new ByteBufferAccessor(buffer), version)); + public static EndQuorumEpochResponse parse(Readable readable, short version) { + return new EndQuorumEpochResponse(new EndQuorumEpochResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java index 0ab01bb1a3d33..21f111e93956e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.EndTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; /** @@ -69,8 +68,8 @@ public EndTxnResponseData data() { return data; } - public static EndTxnResponse parse(ByteBuffer buffer, short version) { - return new EndTxnResponse(new EndTxnResponseData(new ByteBufferAccessor(buffer), version)); + public static EndTxnResponse parse(Readable readable, short version) { + return new EndTxnResponse(new EndTxnResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java index 4f534b6721f4e..9faccc417d546 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java @@ -18,8 +18,8 @@ import org.apache.kafka.common.message.EnvelopeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import java.nio.ByteBuffer; import java.util.Map; @@ -72,8 +72,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static EnvelopeResponse parse(ByteBuffer buffer, short version) { - return new EnvelopeResponse(new EnvelopeResponseData(new ByteBufferAccessor(buffer), version)); + public static EnvelopeResponse parse(Readable readable, short version) { + return new EnvelopeResponse(new EnvelopeResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java index 945db7acdc09e..9fe141565c239 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.ExpireDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class ExpireDelegationTokenResponse extends AbstractResponse { @@ -33,8 +32,8 @@ public ExpireDelegationTokenResponse(ExpireDelegationTokenResponseData data) { this.data = data; } - public static ExpireDelegationTokenResponse parse(ByteBuffer buffer, short version) { - return new ExpireDelegationTokenResponse(new ExpireDelegationTokenResponseData(new ByteBufferAccessor(buffer), + public static ExpireDelegationTokenResponse parse(Readable readable, short version) { + return new ExpireDelegationTokenResponse(new ExpireDelegationTokenResponseData(readable, version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java index d4684e07652ed..5013468095c2a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java @@ -22,13 +22,12 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Records; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.EnumMap; @@ -145,8 +144,8 @@ public Map errorCounts() { * *

    This method should only be used in client-side.

    */ - public static FetchResponse parse(ByteBuffer buffer, short version) { - return new FetchResponse(new FetchResponseData(new ByteBufferAccessor(buffer), version)); + public static FetchResponse parse(Readable readable, short version) { + return new FetchResponse(new FetchResponseData(readable, version)); } // Fetch versions 13 and above should have topic IDs for all topics. @@ -244,14 +243,6 @@ public static FetchResponse of(FetchResponseData data) { return new FetchResponse(data); } - // TODO: remove as a part of KAFKA-12410 - public static FetchResponse of(Errors error, - int throttleTimeMs, - int sessionId, - LinkedHashMap responseData) { - return new FetchResponse(toMessage(error, throttleTimeMs, sessionId, responseData.entrySet().iterator(), Collections.emptyList())); - } - // TODO: remove as a part of KAFKA-12410 public static FetchResponse of(Errors error, int throttleTimeMs, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java index ac7ecd313bf30..77a1089abb477 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.FetchSnapshotResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; import java.util.Optional; @@ -99,7 +98,7 @@ public static Optional forTopicPart .findAny(); } - public static FetchSnapshotResponse parse(ByteBuffer buffer, short version) { - return new FetchSnapshotResponse(new FetchSnapshotResponseData(new ByteBufferAccessor(buffer), version)); + public static FetchSnapshotResponse parse(Readable readable, short version) { + return new FetchSnapshotResponse(new FetchSnapshotResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java index 118e15abaf3a9..5bd08934b62c7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.EnumMap; @@ -108,8 +107,8 @@ public Map errorCounts() { } } - public static FindCoordinatorResponse parse(ByteBuffer buffer, short version) { - return new FindCoordinatorResponse(new FindCoordinatorResponseData(new ByteBufferAccessor(buffer), version)); + public static FindCoordinatorResponse parse(Readable readable, short version) { + return new FindCoordinatorResponse(new FindCoordinatorResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java index dd7bea170a8de..fa8cdf28a91df 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -65,8 +64,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static GetTelemetrySubscriptionsResponse parse(ByteBuffer buffer, short version) { + public static GetTelemetrySubscriptionsResponse parse(Readable readable, short version) { return new GetTelemetrySubscriptionsResponse(new GetTelemetrySubscriptionsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java index aebb903e967e7..cc9d81fefa120 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.HeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class HeartbeatResponse extends AbstractResponse { @@ -67,8 +66,8 @@ public HeartbeatResponseData data() { return data; } - public static HeartbeatResponse parse(ByteBuffer buffer, short version) { - return new HeartbeatResponse(new HeartbeatResponseData(new ByteBufferAccessor(buffer), version)); + public static HeartbeatResponse parse(Readable readable, short version) { + return new HeartbeatResponse(new HeartbeatResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java index 8f6830668db06..59cf8f2f138d8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java @@ -21,11 +21,15 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource; +import org.apache.kafka.common.message.IncrementalAlterConfigsRequestDataJsonConverter; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Readable; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; + import java.util.Collection; import java.util.Map; @@ -106,4 +110,16 @@ public AbstractResponse getErrorResponse(final int throttleTimeMs, final Throwab } return new IncrementalAlterConfigsResponse(response); } + + // It is not safe to print all config values + @Override + public String toString() { + JsonNode json = IncrementalAlterConfigsRequestDataJsonConverter.write(data, version()).deepCopy(); + for (JsonNode resource : json.get("resources")) { + for (JsonNode config : resource.get("configs")) { + ((ObjectNode) config).put("value", "REDACTED"); + } + } + return IncrementalAlterConfigsRequestDataJsonConverter.read(json, version()).toString(); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java index 2f660f758d77f..2f487ac420d46 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java @@ -21,10 +21,9 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumMap; import java.util.HashMap; @@ -96,8 +95,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static IncrementalAlterConfigsResponse parse(ByteBuffer buffer, short version) { + public static IncrementalAlterConfigsResponse parse(Readable readable, short version) { return new IncrementalAlterConfigsResponse(new IncrementalAlterConfigsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java index 8255888fd76dc..5051890b1829d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java @@ -62,7 +62,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(Errors.forException(e).code()) .setProducerId(RecordBatch.NO_PRODUCER_ID) .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - .setThrottleTimeMs(0); + .setThrottleTimeMs(throttleTimeMs); return new InitProducerIdResponse(response); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java index 96c7a4d400ced..6e864b9ec4292 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.InitProducerIdResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; /** @@ -63,8 +62,8 @@ public InitProducerIdResponseData data() { return data; } - public static InitProducerIdResponse parse(ByteBuffer buffer, short version) { - return new InitProducerIdResponse(new InitProducerIdResponseData(new ByteBufferAccessor(buffer), version)); + public static InitProducerIdResponse parse(Readable readable, short version) { + return new InitProducerIdResponse(new InitProducerIdResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java index 780a66c368103..2feaf55d8142b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static JoinGroupResponse parse(ByteBuffer buffer, short version) { - return new JoinGroupResponse(new JoinGroupResponseData(new ByteBufferAccessor(buffer), version), version); + public static JoinGroupResponse parse(Readable readable, short version) { + return new JoinGroupResponse(new JoinGroupResponseData(readable, version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java deleted file mode 100644 index 90c0add47a9ab..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.Node; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.utils.FlattenedIterator; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -public final class LeaderAndIsrRequest { - - /** - * Indicates if a controller request is incremental, full, or unknown. - */ - public enum Type { - UNKNOWN(0), - INCREMENTAL(1), - FULL(2); - - private final byte type; - Type(int type) { - this.type = (byte) type; - } - - public byte toByte() { - return type; - } - } - - public static class Builder { - protected final int controllerId; - protected final int controllerEpoch; - protected final long brokerEpoch; - private final List partitionStates; - private final Map topicIds; - private final Collection liveLeaders; - private final Type updateType; - - public Builder(int controllerId, int controllerEpoch, long brokerEpoch, - List partitionStates, Map topicIds, - Collection liveLeaders) { - this(controllerId, controllerEpoch, brokerEpoch, partitionStates, topicIds, liveLeaders, Type.UNKNOWN); - } - - public Builder(int controllerId, int controllerEpoch, long brokerEpoch, - List partitionStates, Map topicIds, - Collection liveLeaders, Type updateType) { - this.controllerId = controllerId; - this.controllerEpoch = controllerEpoch; - this.brokerEpoch = brokerEpoch; - this.partitionStates = partitionStates; - this.topicIds = topicIds; - this.liveLeaders = liveLeaders; - this.updateType = updateType; - } - - public LeaderAndIsrRequest build() { - return new LeaderAndIsrRequest(this); - } - - @Override - public String toString() { - return "(type=LeaderAndIsRequest" + - ", controllerId=" + controllerId + - ", controllerEpoch=" + controllerEpoch + - ", brokerEpoch=" + brokerEpoch + - ", partitionStates=" + partitionStates + - ", topicIds=" + topicIds + - ", liveLeaders=(" + liveLeaders.stream().map(Node::toString).collect(Collectors.joining(", ")) + ")" + - ")"; - - } - } - - private final int controllerId; - private final int controllerEpoch; - private final long brokerEpoch; - private final List liveLeaders; - private final List topicStates; - private final Type requestType; - - public LeaderAndIsrRequest(Builder builder) { - this.controllerId = builder.controllerId; - this.controllerEpoch = builder.controllerEpoch; - this.brokerEpoch = builder.brokerEpoch; - this.requestType = builder.updateType; - this.liveLeaders = new ArrayList<>(builder.liveLeaders); - this.topicStates = new ArrayList<>(groupByTopic(builder.partitionStates, builder.topicIds).values()); - } - - private static Map groupByTopic(List partitionStates, Map topicIds) { - Map topicStates = new HashMap<>(); - for (PartitionState partition : partitionStates) { - TopicState topicState = topicStates.computeIfAbsent(partition.topicName(), t -> { - var topic = new TopicState(); - topic.topicName = partition.topicName(); - topic.topicId = topicIds.getOrDefault(partition.topicName(), Uuid.ZERO_UUID); - return topic; - }); - topicState.partitionStates().add(partition); - } - return topicStates; - } - - public int controllerId() { - return controllerId; - } - - public int controllerEpoch() { - return controllerEpoch; - } - - public long brokerEpoch() { - return brokerEpoch; - } - - public Iterable partitionStates() { - return () -> new FlattenedIterator<>(topicStates.iterator(), - topicState -> topicState.partitionStates().iterator()); - } - - public Map topicIds() { - return topicStates.stream() - .collect(Collectors.toMap(TopicState::topicName, TopicState::topicId)); - } - - public List liveLeaders() { - return Collections.unmodifiableList(liveLeaders); - } - - public Type requestType() { - return requestType; - } - - public LeaderAndIsrResponse getErrorResponse(Exception e) { - LinkedHashMap> errorsMap = new LinkedHashMap<>(); - Errors error = Errors.forException(e); - - for (TopicState topicState : topicStates) { - List partitions = new ArrayList<>(topicState.partitionStates().size()); - for (PartitionState partition : topicState.partitionStates()) { - partitions.add(new LeaderAndIsrResponse.PartitionError(partition.partitionIndex, error.code())); - } - errorsMap.put(topicState.topicId, partitions); - } - - return new LeaderAndIsrResponse(error, errorsMap); - - } - - public static class TopicState { - String topicName; - Uuid topicId; - List partitionStates; - - public TopicState() { - this.topicName = ""; - this.topicId = Uuid.ZERO_UUID; - this.partitionStates = new ArrayList<>(0); - } - - public String topicName() { - return this.topicName; - } - - public Uuid topicId() { - return this.topicId; - } - - public List partitionStates() { - return this.partitionStates; - } - - @Override - public boolean equals(Object o) { - if (o == null || getClass() != o.getClass()) - return false; - TopicState that = (TopicState) o; - return Objects.equals(topicName, that.topicName) && - Objects.equals(topicId, that.topicId) && - Objects.equals(partitionStates, that.partitionStates); - } - - @Override - public int hashCode() { - return Objects.hash(topicName, topicId, partitionStates); - } - - @Override - public String toString() { - return "LeaderAndIsrTopicState(" - + "topicName='" + topicName + "'" - + ", topicId=" + topicId - + ", partitionStates=" + MessageUtil.deepToString(partitionStates.iterator()) - + ")"; - } - } - - public static class PartitionState { - String topicName; - int partitionIndex; - int controllerEpoch; - int leader; - int leaderEpoch; - List isr; - int partitionEpoch; - List replicas; - List addingReplicas; - List removingReplicas; - boolean isNew; - byte leaderRecoveryState; - - public PartitionState() { - this.topicName = ""; - this.partitionIndex = 0; - this.controllerEpoch = 0; - this.leader = 0; - this.leaderEpoch = 0; - this.isr = new ArrayList<>(0); - this.partitionEpoch = 0; - this.replicas = new ArrayList<>(0); - this.addingReplicas = new ArrayList<>(0); - this.removingReplicas = new ArrayList<>(0); - this.isNew = false; - this.leaderRecoveryState = (byte) 0; - } - - @Override - public boolean equals(Object o) { - if (o == null || getClass() != o.getClass()) return false; - PartitionState that = (PartitionState) o; - return partitionIndex == that.partitionIndex && - controllerEpoch == that.controllerEpoch && - leader == that.leader && - leaderEpoch == that.leaderEpoch && - partitionEpoch == that.partitionEpoch && - isNew == that.isNew && - leaderRecoveryState == that.leaderRecoveryState && - Objects.equals(topicName, that.topicName) && - Objects.equals(isr, that.isr) && - Objects.equals(replicas, that.replicas) && - Objects.equals(addingReplicas, that.addingReplicas) && - Objects.equals(removingReplicas, that.removingReplicas); - } - - @Override - public int hashCode() { - return Objects.hash(topicName, partitionIndex, controllerEpoch, leader, leaderEpoch, isr, partitionEpoch, - replicas, addingReplicas, removingReplicas, isNew, leaderRecoveryState); - } - - @Override - public String toString() { - return "LeaderAndIsrPartitionState(" - + "topicName='" + topicName + "'" - + ", partitionIndex=" + partitionIndex - + ", controllerEpoch=" + controllerEpoch - + ", leader=" + leader - + ", leaderEpoch=" + leaderEpoch - + ", isr=" + MessageUtil.deepToString(isr.iterator()) - + ", partitionEpoch=" + partitionEpoch - + ", replicas=" + MessageUtil.deepToString(replicas.iterator()) - + ", addingReplicas=" + MessageUtil.deepToString(addingReplicas.iterator()) - + ", removingReplicas=" + MessageUtil.deepToString(removingReplicas.iterator()) - + ", isNew=" + (isNew ? "true" : "false") - + ", leaderRecoveryState=" + leaderRecoveryState - + ")"; - } - - public String topicName() { - return this.topicName; - } - - public int partitionIndex() { - return this.partitionIndex; - } - - public int controllerEpoch() { - return this.controllerEpoch; - } - - public int leader() { - return this.leader; - } - - public int leaderEpoch() { - return this.leaderEpoch; - } - - public List isr() { - return this.isr; - } - - public int partitionEpoch() { - return this.partitionEpoch; - } - - public List replicas() { - return this.replicas; - } - - public List addingReplicas() { - return this.addingReplicas; - } - - public List removingReplicas() { - return this.removingReplicas; - } - - public boolean isNew() { - return this.isNew; - } - - public byte leaderRecoveryState() { - return this.leaderRecoveryState; - } - - public PartitionState setTopicName(String v) { - this.topicName = v; - return this; - } - - public PartitionState setPartitionIndex(int v) { - this.partitionIndex = v; - return this; - } - - public PartitionState setControllerEpoch(int v) { - this.controllerEpoch = v; - return this; - } - - public PartitionState setLeader(int v) { - this.leader = v; - return this; - } - - public PartitionState setLeaderEpoch(int v) { - this.leaderEpoch = v; - return this; - } - - public PartitionState setIsr(List v) { - this.isr = v; - return this; - } - - public PartitionState setPartitionEpoch(int v) { - this.partitionEpoch = v; - return this; - } - - public PartitionState setReplicas(List v) { - this.replicas = v; - return this; - } - - public PartitionState setAddingReplicas(List v) { - this.addingReplicas = v; - return this; - } - - public PartitionState setRemovingReplicas(List v) { - this.removingReplicas = v; - return this; - } - - public PartitionState setIsNew(boolean v) { - this.isNew = v; - return this; - } - - public PartitionState setLeaderRecoveryState(byte v) { - this.leaderRecoveryState = v; - return this; - } - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java deleted file mode 100644 index bcdcf35b5d38d..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.protocol.Errors; - -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -public class LeaderAndIsrResponse { - - /** - * Possible error code: - * - * STALE_CONTROLLER_EPOCH (11) - * STALE_BROKER_EPOCH (77) - */ - private final Errors error; - private final LinkedHashMap> topicErrors; - - public LeaderAndIsrResponse(Errors error, LinkedHashMap> topicErrors) { - this.error = error; - this.topicErrors = topicErrors; - } - - public LinkedHashMap> topics() { - return topicErrors; - } - - public Errors error() { - return error; - } - - public Map errorCounts() { - Errors error = error(); - if (error != Errors.NONE) { - // Minor optimization since the top-level error applies to all partitions - return Collections.singletonMap(error, topics().values().stream().mapToInt(partitionErrors -> - partitionErrors.size()).sum() + 1); - } - Map errors = AbstractResponse.errorCounts(topics().values().stream().flatMap(partitionErrors -> - partitionErrors.stream()).map(p -> Errors.forCode(p.errorCode))); - AbstractResponse.updateErrorCounts(errors, Errors.NONE); - return errors; - } - - public Map partitionErrors(Map topicNames) { - Map errors = new HashMap<>(); - topics().forEach((topicId, partitionErrors) -> { - String topicName = topicNames.get(topicId); - if (topicName != null) { - partitionErrors.forEach(partition -> - errors.put(new TopicPartition(topicName, partition.partitionIndex), Errors.forCode(partition.errorCode))); - } - }); - return errors; - } - - @Override - public String toString() { - return "LeaderAndIsrResponse{" + - "error=" + error + - ", topicErrors=" + topicErrors + - '}'; - } - - public static class PartitionError { - public final int partitionIndex; - public final short errorCode; - - public PartitionError(int partitionIndex, short errorCode) { - this.partitionIndex = partitionIndex; - this.errorCode = errorCode; - } - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java index d8ee6b0336412..7cae507d4c2a7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.List; import java.util.Map; @@ -149,8 +148,8 @@ public LeaveGroupResponseData data() { return data; } - public static LeaveGroupResponse parse(ByteBuffer buffer, short version) { - return new LeaveGroupResponse(new LeaveGroupResponseData(new ByteBufferAccessor(buffer), version)); + public static LeaveGroupResponse parse(Readable readable, short version) { + return new LeaveGroupResponse(new LeaveGroupResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java deleted file mode 100644 index 417740d0ffa86..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.requests; - -import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; - -public class ListClientMetricsResourcesRequest extends AbstractRequest { - public static class Builder extends AbstractRequest.Builder { - public final ListClientMetricsResourcesRequestData data; - - public Builder(ListClientMetricsResourcesRequestData data) { - super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES); - this.data = data; - } - - @Override - public ListClientMetricsResourcesRequest build(short version) { - return new ListClientMetricsResourcesRequest(data, version); - } - - @Override - public String toString() { - return data.toString(); - } - } - - private final ListClientMetricsResourcesRequestData data; - - private ListClientMetricsResourcesRequest(ListClientMetricsResourcesRequestData data, short version) { - super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES, version); - this.data = data; - } - - public ListClientMetricsResourcesRequestData data() { - return data; - } - - @Override - public ListClientMetricsResourcesResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - ListClientMetricsResourcesResponseData response = new ListClientMetricsResourcesResponseData() - .setErrorCode(error.code()) - .setThrottleTimeMs(throttleTimeMs); - return new ListClientMetricsResourcesResponse(response); - } - - public static ListClientMetricsResourcesRequest parse(Readable readable, short version) { - return new ListClientMetricsResourcesRequest(new ListClientMetricsResourcesRequestData( - readable, version), version); - } - - @Override - public String toString(boolean verbose) { - return data.toString(); - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesRequest.java new file mode 100644 index 0000000000000..436d08c490973 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesRequest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.message.ListConfigResourcesRequestData; +import org.apache.kafka.common.message.ListConfigResourcesResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; + +import java.util.HashSet; +import java.util.Set; + +public class ListConfigResourcesRequest extends AbstractRequest { + public static class Builder extends AbstractRequest.Builder { + public final ListConfigResourcesRequestData data; + + public Builder(ListConfigResourcesRequestData data) { + super(ApiKeys.LIST_CONFIG_RESOURCES); + this.data = data; + } + + @Override + public ListConfigResourcesRequest build(short version) { + if (version == 0) { + // The v0 only supports CLIENT_METRICS resource type. + Set resourceTypes = new HashSet<>(data.resourceTypes()); + if (resourceTypes.size() != 1 || !resourceTypes.contains(ConfigResource.Type.CLIENT_METRICS.id())) { + throw new UnsupportedVersionException("The v0 ListConfigResources only supports CLIENT_METRICS"); + } + // The v0 request does not have resource types field, so creating a new request data. + return new ListConfigResourcesRequest(new ListConfigResourcesRequestData(), version); + } + return new ListConfigResourcesRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ListConfigResourcesRequestData data; + + private ListConfigResourcesRequest(ListConfigResourcesRequestData data, short version) { + super(ApiKeys.LIST_CONFIG_RESOURCES, version); + this.data = data; + } + + public ListConfigResourcesRequestData data() { + return data; + } + + @Override + public ListConfigResourcesResponse getErrorResponse(int throttleTimeMs, Throwable e) { + Errors error = Errors.forException(e); + ListConfigResourcesResponseData response = new ListConfigResourcesResponseData() + .setErrorCode(error.code()) + .setThrottleTimeMs(throttleTimeMs); + return new ListConfigResourcesResponse(response); + } + + public static ListConfigResourcesRequest parse(Readable readable, short version) { + return new ListConfigResourcesRequest(new ListConfigResourcesRequestData( + readable, version), version); + } + + @Override + public String toString(boolean verbose) { + return data.toString(); + } + + /** + * Return the supported config resource types in different request version. + * If there is a new config resource type, the ListConfigResourcesRequest should bump a new request version to include it. + * For v0, the supported config resource types contain CLIENT_METRICS (16). + * For v1, the supported config resource types contain TOPIC (2), BROKER (4), BROKER_LOGGER (8), CLIENT_METRICS (16), and GROUP (32). + */ + public Set supportedResourceTypes() { + return version() == 0 ? + Set.of(ConfigResource.Type.CLIENT_METRICS.id()) : + Set.of( + ConfigResource.Type.TOPIC.id(), + ConfigResource.Type.BROKER.id(), + ConfigResource.Type.BROKER_LOGGER.id(), + ConfigResource.Type.CLIENT_METRICS.id(), + ConfigResource.Type.GROUP.id() + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesResponse.java similarity index 60% rename from clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java rename to clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesResponse.java index 87b25a0a90444..f9fa50d02a986 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesResponse.java @@ -16,26 +16,25 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.clients.admin.ClientMetricsResourceListing; -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.message.ListConfigResourcesResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collection; import java.util.Map; import java.util.stream.Collectors; -public class ListClientMetricsResourcesResponse extends AbstractResponse { - private final ListClientMetricsResourcesResponseData data; +public class ListConfigResourcesResponse extends AbstractResponse { + private final ListConfigResourcesResponseData data; - public ListClientMetricsResourcesResponse(ListClientMetricsResourcesResponseData data) { - super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES); + public ListConfigResourcesResponse(ListConfigResourcesResponseData data) { + super(ApiKeys.LIST_CONFIG_RESOURCES); this.data = data; } - public ListClientMetricsResourcesResponseData data() { + public ListConfigResourcesResponseData data() { return data; } @@ -48,9 +47,9 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static ListClientMetricsResourcesResponse parse(ByteBuffer buffer, short version) { - return new ListClientMetricsResourcesResponse(new ListClientMetricsResourcesResponseData( - new ByteBufferAccessor(buffer), version)); + public static ListConfigResourcesResponse parse(Readable readable, short version) { + return new ListConfigResourcesResponse(new ListConfigResourcesResponseData( + readable, version)); } @Override @@ -68,10 +67,14 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public Collection clientMetricsResources() { - return data.clientMetricsResources() + public Collection configResources() { + return data.configResources() .stream() - .map(entry -> new ClientMetricsResourceListing(entry.name())) - .collect(Collectors.toList()); + .map(entry -> + new ConfigResource( + ConfigResource.Type.forId(entry.resourceType()), + entry.resourceName() + ) + ).collect(Collectors.toList()); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java index 6dfbcca955a7e..84f7cc2a72d69 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.GroupType; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; @@ -24,6 +25,8 @@ import org.apache.kafka.common.protocol.Readable; import java.util.Collections; +import java.util.HashSet; +import java.util.List; /** * Possible error codes: @@ -50,8 +53,19 @@ public ListGroupsRequest build(short version) { "v" + version + ", but we need v4 or newer to request groups by states."); } if (!data.typesFilter().isEmpty() && version < 5) { - throw new UnsupportedVersionException("The broker only supports ListGroups " + - "v" + version + ", but we need v5 or newer to request groups by type."); + // Types filter is supported by brokers with version 3.8.0 or later. Older brokers only support + // classic groups, so listing consumer groups on an older broker does not need to use a types filter. + // If the types filter is only for consumer and classic, or just classic groups, it can be safely omitted. + // This allows a modern admin client to list consumer groups on older brokers in a straightforward way. + HashSet typesCopy = new HashSet<>(data.typesFilter()); + boolean containedClassic = typesCopy.remove(GroupType.CLASSIC.toString()); + boolean containedConsumer = typesCopy.remove(GroupType.CONSUMER.toString()); + if (!typesCopy.isEmpty() || (!containedClassic && containedConsumer)) { + throw new UnsupportedVersionException("The broker only supports ListGroups " + + "v" + version + ", but we need v5 or newer to request groups by type. " + + "Requested group types: [" + String.join(", ", data.typesFilter()) + "]."); + } + return new ListGroupsRequest(data.duplicate().setTypesFilter(List.of()), version); } return new ListGroupsRequest(data, version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java index a12f85341d6a4..fa40a4f751067 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class ListGroupsResponse extends AbstractResponse { @@ -53,8 +52,8 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static ListGroupsResponse parse(ByteBuffer buffer, short version) { - return new ListGroupsResponse(new ListGroupsResponseData(new ByteBufferAccessor(buffer), version)); + public static ListGroupsResponse parse(Readable readable, short version) { + return new ListGroupsResponse(new ListGroupsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java index 065a0b5d8a8a6..5862ebdfafc67 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java @@ -47,6 +47,8 @@ public class ListOffsetsRequest extends AbstractRequest { public static final long LATEST_TIERED_TIMESTAMP = -5L; + public static final long EARLIEST_PENDING_UPLOAD_TIMESTAMP = -6L; + public static final int CONSUMER_REPLICA_ID = -1; public static final int DEBUGGING_REPLICA_ID = -2; @@ -58,16 +60,19 @@ public static class Builder extends AbstractRequest.Builder public static Builder forConsumer(boolean requireTimestamp, IsolationLevel isolationLevel) { - return forConsumer(requireTimestamp, isolationLevel, false, false, false); + return forConsumer(requireTimestamp, isolationLevel, false, false, false, false); } public static Builder forConsumer(boolean requireTimestamp, IsolationLevel isolationLevel, boolean requireMaxTimestamp, boolean requireEarliestLocalTimestamp, - boolean requireTieredStorageTimestamp) { - short minVersion = 0; - if (requireTieredStorageTimestamp) + boolean requireTieredStorageTimestamp, + boolean requireEarliestPendingUploadTimestamp) { + short minVersion = ApiKeys.LIST_OFFSETS.oldestVersion(); + if (requireEarliestPendingUploadTimestamp) + minVersion = 11; + else if (requireTieredStorageTimestamp) minVersion = 9; else if (requireEarliestLocalTimestamp) minVersion = 8; @@ -81,7 +86,7 @@ else if (requireTimestamp) } public static Builder forReplica(short allowedVersion, int replicaId) { - return new Builder((short) 0, allowedVersion, replicaId, IsolationLevel.READ_UNCOMMITTED); + return new Builder(ApiKeys.LIST_OFFSETS.oldestVersion(), allowedVersion, replicaId, IsolationLevel.READ_UNCOMMITTED); } private Builder(short oldestAllowedVersion, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java index 003daf55df607..cadff02033958 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java @@ -21,11 +21,10 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.EnumMap; import java.util.List; @@ -89,8 +88,8 @@ public Map errorCounts() { return errorCounts; } - public static ListOffsetsResponse parse(ByteBuffer buffer, short version) { - return new ListOffsetsResponse(new ListOffsetsResponseData(new ByteBufferAccessor(buffer), version)); + public static ListOffsetsResponse parse(Readable readable, short version) { + return new ListOffsetsResponse(new ListOffsetsResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java index cbf06d4c46624..cee49055598a9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class ListPartitionReassignmentsResponse extends AbstractResponse { @@ -33,9 +32,9 @@ public ListPartitionReassignmentsResponse(ListPartitionReassignmentsResponseData this.data = responseData; } - public static ListPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) { + public static ListPartitionReassignmentsResponse parse(Readable readable, short version) { return new ListPartitionReassignmentsResponse(new ListPartitionReassignmentsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java index 5f7ea5b157d7c..34c39625972c9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java @@ -38,6 +38,10 @@ public ListTransactionsRequest build(short version) { throw new UnsupportedVersionException("Duration filter can be set only when using API version 1 or higher." + " If client is connected to an older broker, do not specify duration filter or set duration filter to -1."); } + if (data.transactionalIdPattern() != null && version < 2) { + throw new UnsupportedVersionException("Transactional ID pattern filter can be set only when using API version 2 or higher." + + " If client is connected to an older broker, do not specify the pattern filter."); + } return new ListTransactionsRequest(data, version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java index 6bebdb2cd0ec3..e563b111984b1 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.ListTransactionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -44,9 +43,9 @@ public Map errorCounts() { return errorCounts; } - public static ListTransactionsResponse parse(ByteBuffer buffer, short version) { + public static ListTransactionsResponse parse(Readable readable, short version) { return new ListTransactionsResponse(new ListTransactionsResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java index e0a213fa62ef8..d3dcabfb4f9c5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java @@ -27,7 +27,9 @@ import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; public class MetadataRequest extends AbstractRequest { @@ -49,33 +51,33 @@ public Builder(List topics, boolean allowAutoTopicCreation, short allowe public Builder(List topics, boolean allowAutoTopicCreation, short minVersion, short maxVersion) { super(ApiKeys.METADATA, minVersion, maxVersion); + this.data = requestTopicNamesOrAllTopics(topics, allowAutoTopicCreation); + } + + private MetadataRequestData requestTopicNamesOrAllTopics(List topics, boolean allowAutoTopicCreation) { MetadataRequestData data = new MetadataRequestData(); if (topics == null) data.setTopics(null); else { topics.forEach(topic -> data.topics().add(new MetadataRequestTopic().setName(topic))); } - data.setAllowAutoTopicCreation(allowAutoTopicCreation); - this.data = data; + return data; } - public Builder(List topics, boolean allowAutoTopicCreation) { - this(topics, allowAutoTopicCreation, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); - } - - public Builder(List topicIds) { - super(ApiKeys.METADATA, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); + private static MetadataRequestData requestTopicIds(Set topicIds) { MetadataRequestData data = new MetadataRequestData(); if (topicIds == null) data.setTopics(null); else { topicIds.forEach(topicId -> data.topics().add(new MetadataRequestTopic().setTopicId(topicId))); } + data.setAllowAutoTopicCreation(false); // can't auto-create without topic name + return data; + } - // It's impossible to create topic with topicId - data.setAllowAutoTopicCreation(false); - this.data = data; + public Builder(List topics, boolean allowAutoTopicCreation) { + this(topics, allowAutoTopicCreation, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); } public static Builder allTopics() { @@ -84,6 +86,20 @@ public static Builder allTopics() { return new Builder(ALL_TOPICS_REQUEST_DATA); } + /** + * @return Builder for metadata request using topic names. + */ + public static Builder forTopicNames(List topicNames, boolean allowAutoTopicCreation) { + return new MetadataRequest.Builder(topicNames, allowAutoTopicCreation); + } + + /** + * @return Builder for metadata request using topic IDs. + */ + public static Builder forTopicIds(Set topicIds) { + return new MetadataRequest.Builder(requestTopicIds(new HashSet<>(topicIds))); + } + public boolean emptyTopicList() { return data.topics().isEmpty(); } @@ -92,6 +108,13 @@ public boolean isAllTopics() { return data.topics() == null; } + public List topicIds() { + return data.topics() + .stream() + .map(MetadataRequestTopic::topicId) + .collect(Collectors.toList()); + } + public List topics() { return data.topics() .stream() diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java index ade38d74d3e62..bbcebdd484618 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java @@ -26,10 +26,9 @@ import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition; import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -281,8 +280,8 @@ private static boolean hasReliableLeaderEpochs(short version) { return version >= 9; } - public static MetadataResponse parse(ByteBuffer buffer, short version) { - return new MetadataResponse(new MetadataResponseData(new ByteBufferAccessor(buffer), version), + public static MetadataResponse parse(Readable readable, short version) { + return new MetadataResponse(new MetadataResponseData(readable, version), hasReliableLeaderEpochs(version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java index 8f6ab39d1fce4..1bd9c41f66834 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestTopic; @@ -45,20 +46,39 @@ public static class Builder extends AbstractRequest.Builder private final OffsetCommitRequestData data; - public Builder(OffsetCommitRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.OFFSET_COMMIT, enableUnstableLastVersion); + private Builder(OffsetCommitRequestData data, short oldestAllowedVersion, short latestAllowedVersion) { + super(ApiKeys.OFFSET_COMMIT, oldestAllowedVersion, latestAllowedVersion); this.data = data; } - public Builder(OffsetCommitRequestData data) { - this(data, false); + public static Builder forTopicIdsOrNames(OffsetCommitRequestData data, boolean enableUnstableLastVersion) { + return new Builder(data, ApiKeys.OFFSET_COMMIT.oldestVersion(), ApiKeys.OFFSET_COMMIT.latestVersion(enableUnstableLastVersion)); + } + + public static Builder forTopicNames(OffsetCommitRequestData data) { + return new Builder(data, ApiKeys.OFFSET_COMMIT.oldestVersion(), (short) 9); } @Override public OffsetCommitRequest build(short version) { if (data.groupInstanceId() != null && version < 7) { - throw new UnsupportedVersionException("The broker offset commit protocol version " + - version + " does not support usage of config group.instance.id."); + throw new UnsupportedVersionException("The broker offset commit api version " + + version + " does not support usage of config group.instance.id."); + } + if (version >= 10) { + data.topics().forEach(topic -> { + if (topic.topicId() == null || topic.topicId().equals(Uuid.ZERO_UUID)) { + throw new UnsupportedVersionException("The broker offset commit api version " + + version + " does require usage of topic ids."); + } + }); + } else { + data.topics().forEach(topic -> { + if (topic.name() == null || topic.name().isEmpty()) { + throw new UnsupportedVersionException("The broker offset commit api version " + + version + " does require usage of topic names."); + } + }); } return new OffsetCommitRequest(data, version); } @@ -97,6 +117,7 @@ public static OffsetCommitResponseData getErrorResponse( OffsetCommitResponseData response = new OffsetCommitResponseData(); request.topics().forEach(topic -> { OffsetCommitResponseTopic responseTopic = new OffsetCommitResponseTopic() + .setTopicId(topic.topicId()) .setName(topic.name()); response.topics().add(responseTopic); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java index 2b6d00b1a47f6..521ffa1c2fdc3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java @@ -17,14 +17,14 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -99,8 +99,8 @@ public Map errorCounts() { Errors.forCode(partitionResult.errorCode())))); } - public static OffsetCommitResponse parse(ByteBuffer buffer, short version) { - return new OffsetCommitResponse(new OffsetCommitResponseData(new ByteBufferAccessor(buffer), version)); + public static OffsetCommitResponse parse(Readable readable, short version) { + return new OffsetCommitResponse(new OffsetCommitResponseData(readable, version)); } @Override @@ -123,43 +123,56 @@ public boolean shouldClientThrottle(short version) { return version >= 4; } - public static class Builder { - OffsetCommitResponseData data = new OffsetCommitResponseData(); - HashMap byTopicName = new HashMap<>(); + public static boolean useTopicIds(short version) { + return version >= 10; + } - private OffsetCommitResponseTopic getOrCreateTopic( - String topicName - ) { - OffsetCommitResponseTopic topic = byTopicName.get(topicName); - if (topic == null) { - topic = new OffsetCommitResponseTopic().setName(topicName); - data.topics().add(topic); - byTopicName.put(topicName, topic); - } - return topic; + public static Builder newBuilder(boolean useTopicIds) { + if (useTopicIds) { + return new TopicIdBuilder(); + } else { + return new TopicNameBuilder(); } + } + + public abstract static class Builder { + protected OffsetCommitResponseData data = new OffsetCommitResponseData(); + + protected abstract void add( + OffsetCommitResponseTopic topic + ); + + protected abstract OffsetCommitResponseTopic get( + Uuid topicId, + String topicName + ); + + protected abstract OffsetCommitResponseTopic getOrCreate( + Uuid topicId, + String topicName + ); public Builder addPartition( + Uuid topicId, String topicName, int partitionIndex, Errors error ) { - final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); - + final OffsetCommitResponseTopic topicResponse = getOrCreate(topicId, topicName); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partitionIndex) .setErrorCode(error.code())); - return this; } public

    Builder addPartitions( + Uuid topicId, String topicName, List

    partitions, Function partitionIndex, Errors error ) { - final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); + final OffsetCommitResponseTopic topicResponse = getOrCreate(topicId, topicName); partitions.forEach(partition -> topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partitionIndex.apply(partition)) @@ -177,11 +190,10 @@ public Builder merge( } else { // Otherwise, we have to merge them together. newData.topics().forEach(newTopic -> { - OffsetCommitResponseTopic existingTopic = byTopicName.get(newTopic.name()); + OffsetCommitResponseTopic existingTopic = get(newTopic.topicId(), newTopic.name()); if (existingTopic == null) { // If no topic exists, we can directly copy the new topic data. - data.topics().add(newTopic); - byTopicName.put(newTopic.name(), newTopic); + add(newTopic); } else { // Otherwise, we add the partitions to the existing one. Note we // expect non-overlapping partitions here as we don't verify @@ -190,7 +202,6 @@ public Builder merge( } }); } - return this; } @@ -198,4 +209,78 @@ public OffsetCommitResponse build() { return new OffsetCommitResponse(data); } } + + public static class TopicIdBuilder extends Builder { + private final HashMap byTopicId = new HashMap<>(); + + @Override + protected void add(OffsetCommitResponseTopic topic) { + throwIfTopicIdIsNull(topic.topicId()); + data.topics().add(topic); + byTopicId.put(topic.topicId(), topic); + } + + @Override + protected OffsetCommitResponseTopic get(Uuid topicId, String topicName) { + throwIfTopicIdIsNull(topicId); + return byTopicId.get(topicId); + } + + @Override + protected OffsetCommitResponseTopic getOrCreate(Uuid topicId, String topicName) { + throwIfTopicIdIsNull(topicId); + OffsetCommitResponseTopic topic = byTopicId.get(topicId); + if (topic == null) { + topic = new OffsetCommitResponseTopic() + .setName(topicName) + .setTopicId(topicId); + data.topics().add(topic); + byTopicId.put(topicId, topic); + } + return topic; + } + + private static void throwIfTopicIdIsNull(Uuid topicId) { + if (topicId == null) { + throw new IllegalArgumentException("TopicId cannot be null."); + } + } + } + + public static class TopicNameBuilder extends Builder { + private final HashMap byTopicName = new HashMap<>(); + + @Override + protected void add(OffsetCommitResponseTopic topic) { + throwIfTopicNameIsNull(topic.name()); + data.topics().add(topic); + byTopicName.put(topic.name(), topic); + } + + @Override + protected OffsetCommitResponseTopic get(Uuid topicId, String topicName) { + throwIfTopicNameIsNull(topicName); + return byTopicName.get(topicName); + } + + @Override + protected OffsetCommitResponseTopic getOrCreate(Uuid topicId, String topicName) { + throwIfTopicNameIsNull(topicName); + OffsetCommitResponseTopic topic = byTopicName.get(topicName); + if (topic == null) { + topic = new OffsetCommitResponseTopic() + .setName(topicName) + .setTopicId(topicId); + data.topics().add(topic); + byTopicName.put(topicName, topic); + } + return topic; + } + + private void throwIfTopicNameIsNull(String topicName) { + if (topicName == null) { + throw new IllegalArgumentException("TopicName cannot be null."); + } + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java index a419a096e7a4b..0f3655d62c67d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartition; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.List; import java.util.Map; @@ -150,8 +149,8 @@ public Map errorCounts() { return counts; } - public static OffsetDeleteResponse parse(ByteBuffer buffer, short version) { - return new OffsetDeleteResponse(new OffsetDeleteResponseData(new ByteBufferAccessor(buffer), version)); + public static OffsetDeleteResponse parse(Readable readable, short version) { + return new OffsetDeleteResponse(new OffsetDeleteResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java index 907cba953fb21..0d91788522916 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java @@ -17,14 +17,17 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopic; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.record.RecordBatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,113 +37,67 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; -import java.util.Optional; import java.util.stream.Collectors; public class OffsetFetchRequest extends AbstractRequest { private static final Logger log = LoggerFactory.getLogger(OffsetFetchRequest.class); + public static final short TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION = 2; + public static final short REQUIRE_STABLE_OFFSET_MIN_VERSION = 7; + public static final short BATCH_MIN_VERSION = 8; + public static final short TOPIC_ID_MIN_VERSION = 10; - private static final List ALL_TOPIC_PARTITIONS = null; - private static final List ALL_TOPIC_PARTITIONS_BATCH = null; private final OffsetFetchRequestData data; public static class Builder extends AbstractRequest.Builder { - - public final OffsetFetchRequestData data; + private final OffsetFetchRequestData data; private final boolean throwOnFetchStableOffsetsUnsupported; - public Builder(String groupId, - boolean requireStable, - List partitions, - boolean throwOnFetchStableOffsetsUnsupported) { - this( - groupId, - null, - -1, - requireStable, - partitions, - throwOnFetchStableOffsetsUnsupported + public static Builder forTopicIdsOrNames( + OffsetFetchRequestData data, + boolean throwOnFetchStableOffsetsUnsupported, + boolean enableUnstableLastVersion + ) { + return new Builder( + data, + throwOnFetchStableOffsetsUnsupported, + ApiKeys.OFFSET_FETCH.oldestVersion(), + ApiKeys.OFFSET_FETCH.latestVersion(enableUnstableLastVersion) ); } - public Builder(String groupId, - String memberId, - int memberEpoch, - boolean requireStable, - List partitions, - boolean throwOnFetchStableOffsetsUnsupported) { - super(ApiKeys.OFFSET_FETCH); - - OffsetFetchRequestData.OffsetFetchRequestGroup group = - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setMemberId(memberId) - .setMemberEpoch(memberEpoch); - - if (partitions != null) { - Map offsetFetchRequestTopicMap = new HashMap<>(); - for (TopicPartition topicPartition : partitions) { - String topicName = topicPartition.topic(); - OffsetFetchRequestTopics topic = offsetFetchRequestTopicMap.getOrDefault( - topicName, new OffsetFetchRequestTopics().setName(topicName)); - topic.partitionIndexes().add(topicPartition.partition()); - offsetFetchRequestTopicMap.put(topicName, topic); - } - group.setTopics(new ArrayList<>(offsetFetchRequestTopicMap.values())); - } else { - // If passed in partition list is null, it is requesting offsets for all topic partitions. - group.setTopics(ALL_TOPIC_PARTITIONS_BATCH); - } - - this.data = new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(Collections.singletonList(group)); - this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported; + public static Builder forTopicNames( + OffsetFetchRequestData data, + boolean throwOnFetchStableOffsetsUnsupported + ) { + return new Builder( + data, + throwOnFetchStableOffsetsUnsupported, + ApiKeys.OFFSET_FETCH.oldestVersion(), + (short) (TOPIC_ID_MIN_VERSION - 1) + ); } - public Builder(Map> groupIdToTopicPartitionMap, - boolean requireStable, - boolean throwOnFetchStableOffsetsUnsupported) { - super(ApiKeys.OFFSET_FETCH); - - List groups = new ArrayList<>(); - for (Entry> entry : groupIdToTopicPartitionMap.entrySet()) { - String groupName = entry.getKey(); - List tpList = entry.getValue(); - final List topics; - if (tpList != null) { - Map offsetFetchRequestTopicMap = - new HashMap<>(); - for (TopicPartition topicPartition : tpList) { - String topicName = topicPartition.topic(); - OffsetFetchRequestTopics topic = offsetFetchRequestTopicMap.getOrDefault( - topicName, new OffsetFetchRequestTopics().setName(topicName)); - topic.partitionIndexes().add(topicPartition.partition()); - offsetFetchRequestTopicMap.put(topicName, topic); - } - topics = new ArrayList<>(offsetFetchRequestTopicMap.values()); - } else { - topics = ALL_TOPIC_PARTITIONS_BATCH; - } - groups.add(new OffsetFetchRequestGroup() - .setGroupId(groupName) - .setTopics(topics)); - } - this.data = new OffsetFetchRequestData() - .setGroups(groups) - .setRequireStable(requireStable); + private Builder( + OffsetFetchRequestData data, + boolean throwOnFetchStableOffsetsUnsupported, + short oldestAllowedVersion, + short latestAllowedVersion + ) { + super(ApiKeys.OFFSET_FETCH, oldestAllowedVersion, latestAllowedVersion); + this.data = data; this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported; } - @Override - public OffsetFetchRequest build(short version) { - if (data.groups().size() > 1 && version < 8) { + private void throwIfBatchingIsUnsupported(short version) { + if (data.groups().size() > 1 && version < BATCH_MIN_VERSION) { throw new NoBatchedOffsetFetchRequestException("Broker does not support" + " batching groups for fetch offset request on version " + version); } - if (data.requireStable() && version < 7) { + } + + private void throwIfStableOffsetsUnsupported(short version) { + if (data.requireStable() && version < REQUIRE_STABLE_OFFSET_MIN_VERSION) { if (throwOnFetchStableOffsetsUnsupported) { throw new UnsupportedVersionException("Broker unexpectedly " + "doesn't support requireStable flag on version " + version); @@ -151,37 +108,77 @@ public OffsetFetchRequest build(short version) { data.setRequireStable(false); } } - // convert data to use the appropriate version since version 8 uses different format - if (version < 8) { - OffsetFetchRequestData normalizedData; - if (!data.groups().isEmpty()) { - OffsetFetchRequestGroup group = data.groups().get(0); - String groupName = group.groupId(); - List topics = group.topics(); - List oldFormatTopics = null; - if (topics != null) { - oldFormatTopics = topics - .stream() - .map(t -> - new OffsetFetchRequestTopic() - .setName(t.name()) - .setPartitionIndexes(t.partitionIndexes())) - .collect(Collectors.toList()); + } + + private void throwIfMissingRequiredTopicIdentifiers(short version) { + if (version < TOPIC_ID_MIN_VERSION) { + data.groups().forEach(group -> { + if (group.topics() != null) { + group.topics().forEach(topic -> { + if (topic.name() == null || topic.name().isEmpty()) { + throw new UnsupportedVersionException("The broker offset fetch api version " + + version + " does require usage of topic names."); + } + }); } - normalizedData = new OffsetFetchRequestData() - .setGroupId(groupName) - .setTopics(oldFormatTopics) - .setRequireStable(data.requireStable()); - } else { - normalizedData = data; - } - if (normalizedData.topics() == null && version < 2) { - throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " + - "v" + version + ", but we need v2 or newer to request all topic partitions."); - } - return new OffsetFetchRequest(normalizedData, version); + }); + } else { + data.groups().forEach(group -> { + if (group.topics() != null) { + group.topics().forEach(topic -> { + if (topic.topicId() == null || topic.topicId().equals(Uuid.ZERO_UUID)) { + throw new UnsupportedVersionException("The broker offset fetch api version " + + version + " does require usage of topic ids."); + } + }); + } + }); + } + } + + private void throwIfRequestingAllTopicsIsUnsupported(short version) { + if (version < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { + data.groups().forEach(group -> { + if (group.topics() == null) { + throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " + + "v" + version + ", but we need v2 or newer to request all topic partitions."); + } + }); + } + } + + private OffsetFetchRequestData maybeDowngrade(short version) { + // Convert data to use the appropriate version since version 8 + // uses different format. + if (version >= BATCH_MIN_VERSION || data.groups().isEmpty()) return data; + + OffsetFetchRequestGroup group = data.groups().get(0); + String groupName = group.groupId(); + List topics = group.topics(); + List oldFormatTopics = null; + + if (topics != null) { + oldFormatTopics = topics + .stream() + .map(t -> new OffsetFetchRequestTopic() + .setName(t.name()) + .setPartitionIndexes(t.partitionIndexes())) + .collect(Collectors.toList()); } - return new OffsetFetchRequest(data, version); + + return new OffsetFetchRequestData() + .setGroupId(groupName) + .setTopics(oldFormatTopics) + .setRequireStable(data.requireStable()); + } + + @Override + public OffsetFetchRequest build(short version) { + throwIfBatchingIsUnsupported(version); + throwIfStableOffsetsUnsupported(version); + throwIfMissingRequiredTopicIdentifiers(version); + throwIfRequestingAllTopicsIsUnsupported(version); + return new OffsetFetchRequest(maybeDowngrade(version), version); } @Override @@ -202,19 +199,6 @@ public NoBatchedOffsetFetchRequestException(String message) { } } - public List partitions() { - if (isAllPartitions()) { - return null; - } - List partitions = new ArrayList<>(); - for (OffsetFetchRequestTopic topic : data.topics()) { - for (Integer partitionIndex : topic.partitionIndexes()) { - partitions.add(new TopicPartition(topic.name(), partitionIndex)); - } - } - return partitions; - } - public String groupId() { return data.groupId(); } @@ -224,7 +208,7 @@ public boolean requireStable() { } public List groups() { - if (version() >= 8) { + if (version() >= BATCH_MIN_VERSION) { return data.groups(); } else { OffsetFetchRequestData.OffsetFetchRequestGroup group = @@ -253,7 +237,7 @@ public Map> groupIdsToPartitions() { Map> groupIdsToPartitions = new HashMap<>(); for (OffsetFetchRequestGroup group : data.groups()) { List tpList = null; - if (group.topics() != ALL_TOPIC_PARTITIONS_BATCH) { + if (group.topics() != null) { tpList = new ArrayList<>(); for (OffsetFetchRequestTopics topic : group.topics()) { for (Integer partitionIndex : topic.partitionIndexes()) { @@ -285,65 +269,61 @@ private OffsetFetchRequest(OffsetFetchRequestData data, short version) { this.data = data; } - public OffsetFetchResponse getErrorResponse(Errors error) { - return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, error); - } - - public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Errors error) { - Map responsePartitions = new HashMap<>(); - if (version() < 2) { - OffsetFetchResponse.PartitionData partitionError = new OffsetFetchResponse.PartitionData( - OffsetFetchResponse.INVALID_OFFSET, - Optional.empty(), - OffsetFetchResponse.NO_METADATA, - error); - - for (OffsetFetchRequestTopic topic : this.data.topics()) { - for (int partitionIndex : topic.partitionIndexes()) { - responsePartitions.put( - new TopicPartition(topic.name(), partitionIndex), partitionError); - } - } - return new OffsetFetchResponse(error, responsePartitions); - } - if (version() == 2) { - return new OffsetFetchResponse(error, responsePartitions); - } - if (version() >= 3 && version() < 8) { - return new OffsetFetchResponse(throttleTimeMs, error, responsePartitions); - } - List groupIds = groupIds(); - Map errorsMap = new HashMap<>(groupIds.size()); - Map> partitionMap = - new HashMap<>(groupIds.size()); - for (String g : groupIds) { - errorsMap.put(g, error); - partitionMap.put(g, responsePartitions); - } - return new OffsetFetchResponse(throttleTimeMs, errorsMap, partitionMap); - } - @Override public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Throwable e) { - return getErrorResponse(throttleTimeMs, Errors.forException(e)); + Errors error = Errors.forException(e); + + if (version() < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { + // The response does not support top level error so we return each + // partition with the error. + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setTopics(data.topics().stream().map(topic -> + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName(topic.name()) + .setPartitions(topic.partitionIndexes().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(partition) + .setErrorCode(error.code()) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())), + version() + ); + } else if (version() < BATCH_MIN_VERSION) { + // The response does not support multiple groups but it does support + // top level error. + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code()), + version() + ); + } else { + // The response does support multiple groups so we provide a top level + // error per group. + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setGroups(data.groups().stream().map(group -> + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group.groupId()) + .setErrorCode(error.code()) + ).collect(Collectors.toList())), + version() + ); + } } public static OffsetFetchRequest parse(Readable readable, short version) { return new OffsetFetchRequest(new OffsetFetchRequestData(readable, version), version); } - public boolean isAllPartitions() { - return data.topics() == ALL_TOPIC_PARTITIONS; - } - - public boolean isAllPartitionsForGroup(String groupId) { - OffsetFetchRequestGroup group = data - .groups() - .stream() - .filter(g -> g.groupId().equals(groupId)) - .collect(Collectors.toList()) - .get(0); - return group.topics() == ALL_TOPIC_PARTITIONS_BATCH; + public static boolean useTopicIds(short version) { + return version >= TOPIC_ID_MIN_VERSION; } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java index d2f9d7159b1f1..77297e96e6e6b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java @@ -16,31 +16,26 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseGroup; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartition; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartitions; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopic; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopics; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Optional; +import java.util.function.Function; import java.util.stream.Collectors; import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH; +import static org.apache.kafka.common.requests.OffsetFetchRequest.BATCH_MIN_VERSION; +import static org.apache.kafka.common.requests.OffsetFetchRequest.TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION; /** * Possible error codes: @@ -61,221 +56,112 @@ public class OffsetFetchResponse extends AbstractResponse { public static final long INVALID_OFFSET = -1L; public static final String NO_METADATA = ""; - public static final PartitionData UNKNOWN_PARTITION = new PartitionData(INVALID_OFFSET, - Optional.empty(), - NO_METADATA, - Errors.UNKNOWN_TOPIC_OR_PARTITION); - public static final PartitionData UNAUTHORIZED_PARTITION = new PartitionData(INVALID_OFFSET, - Optional.empty(), - NO_METADATA, - Errors.TOPIC_AUTHORIZATION_FAILED); + + // We only need to track the partition errors returned in version 1. This + // is used to identify group level errors when the response is normalized. private static final List PARTITION_ERRORS = Arrays.asList( - Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.TOPIC_AUTHORIZATION_FAILED); + Errors.UNKNOWN_TOPIC_OR_PARTITION, + Errors.TOPIC_AUTHORIZATION_FAILED + ); + private final short version; private final OffsetFetchResponseData data; - private final Errors error; - private final Map groupLevelErrors = new HashMap<>(); - - public static final class PartitionData { - public final long offset; - public final String metadata; - public final Errors error; - public final Optional leaderEpoch; - - public PartitionData(long offset, - Optional leaderEpoch, - String metadata, - Errors error) { - this.offset = offset; - this.leaderEpoch = leaderEpoch; - this.metadata = metadata; - this.error = error; - } - - public boolean hasError() { - return this.error != Errors.NONE; - } + // Lazily initialized when OffsetFetchResponse#group is called. + private Map groups = null; - @Override - public boolean equals(Object other) { - if (!(other instanceof PartitionData)) - return false; - PartitionData otherPartition = (PartitionData) other; - return Objects.equals(this.offset, otherPartition.offset) - && Objects.equals(this.leaderEpoch, otherPartition.leaderEpoch) - && Objects.equals(this.metadata, otherPartition.metadata) - && Objects.equals(this.error, otherPartition.error); - } + public static class Builder { + private final List groups; - @Override - public String toString() { - return "PartitionData(" - + "offset=" + offset - + ", leaderEpoch=" + leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH) - + ", metadata=" + metadata - + ", error='" + error.toString() - + ")"; + public Builder(OffsetFetchResponseGroup group) { + this(List.of(group)); } - @Override - public int hashCode() { - return Objects.hash(offset, leaderEpoch, metadata, error); - } - } - - /** - * Constructor without throttle time. - * @param error Potential coordinator or group level error code (for api version 2 and later) - * @param responseData Fetched offset information grouped by topic-partition - */ - public OffsetFetchResponse(Errors error, Map responseData) { - this(DEFAULT_THROTTLE_TIME, error, responseData); - } - - /** - * Constructor with throttle time for version 0 to 7 - * @param throttleTimeMs The time in milliseconds that this response was throttled - * @param error Potential coordinator or group level error code (for api version 2 and later) - * @param responseData Fetched offset information grouped by topic-partition - */ - public OffsetFetchResponse(int throttleTimeMs, Errors error, Map responseData) { - super(ApiKeys.OFFSET_FETCH); - Map offsetFetchResponseTopicMap = new HashMap<>(); - for (Map.Entry entry : responseData.entrySet()) { - String topicName = entry.getKey().topic(); - OffsetFetchResponseTopic topic = offsetFetchResponseTopicMap.getOrDefault( - topicName, new OffsetFetchResponseTopic().setName(topicName)); - PartitionData partitionData = entry.getValue(); - topic.partitions().add(new OffsetFetchResponsePartition() - .setPartitionIndex(entry.getKey().partition()) - .setErrorCode(partitionData.error.code()) - .setCommittedOffset(partitionData.offset) - .setCommittedLeaderEpoch( - partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH)) - .setMetadata(partitionData.metadata) - ); - offsetFetchResponseTopicMap.put(topicName, topic); - } - - this.data = new OffsetFetchResponseData() - .setTopics(new ArrayList<>(offsetFetchResponseTopicMap.values())) - .setErrorCode(error.code()) - .setThrottleTimeMs(throttleTimeMs); - this.error = error; - } - - /** - * Constructor with throttle time for version 8 and above. - * @param throttleTimeMs The time in milliseconds that this response was throttled - * @param errors Potential coordinator or group level error code - * @param responseData Fetched offset information grouped by topic-partition and by group - */ - public OffsetFetchResponse(int throttleTimeMs, - Map errors, - Map> responseData) { - super(ApiKeys.OFFSET_FETCH); - List groupList = new ArrayList<>(); - for (Entry> entry : responseData.entrySet()) { - String groupName = entry.getKey(); - Map partitionDataMap = entry.getValue(); - Map offsetFetchResponseTopicsMap = new HashMap<>(); - for (Entry partitionEntry : partitionDataMap.entrySet()) { - String topicName = partitionEntry.getKey().topic(); - OffsetFetchResponseTopics topic = - offsetFetchResponseTopicsMap.getOrDefault(topicName, - new OffsetFetchResponseTopics().setName(topicName)); - PartitionData partitionData = partitionEntry.getValue(); - topic.partitions().add(new OffsetFetchResponsePartitions() - .setPartitionIndex(partitionEntry.getKey().partition()) - .setErrorCode(partitionData.error.code()) - .setCommittedOffset(partitionData.offset) - .setCommittedLeaderEpoch( - partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH)) - .setMetadata(partitionData.metadata)); - offsetFetchResponseTopicsMap.put(topicName, topic); - } - groupList.add(new OffsetFetchResponseGroup() - .setGroupId(groupName) - .setTopics(new ArrayList<>(offsetFetchResponseTopicsMap.values())) - .setErrorCode(errors.get(groupName).code())); - groupLevelErrors.put(groupName, errors.get(groupName)); + public Builder(List groups) { + this.groups = groups; } - this.data = new OffsetFetchResponseData() - .setGroups(groupList) - .setThrottleTimeMs(throttleTimeMs); - this.error = null; - } - - public OffsetFetchResponse(List groups, short version) { - super(ApiKeys.OFFSET_FETCH); - data = new OffsetFetchResponseData(); - - if (version >= 8) { - data.setGroups(groups); - error = null; - for (OffsetFetchResponseGroup group : data.groups()) { - this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode())); - } - } else { - if (groups.size() != 1) { - throw new UnsupportedVersionException( - "Version " + version + " of OffsetFetchResponse only supports one group." - ); - } + public OffsetFetchResponse build(short version) { + var data = new OffsetFetchResponseData(); - OffsetFetchResponseGroup group = groups.get(0); - data.setErrorCode(group.errorCode()); - error = Errors.forCode(group.errorCode()); + if (version >= BATCH_MIN_VERSION) { + data.setGroups(groups); + } else { + if (groups.size() != 1) { + throw new UnsupportedVersionException( + "Version " + version + " of OffsetFetchResponse only supports one group." + ); + } - group.topics().forEach(topic -> { - OffsetFetchResponseTopic newTopic = new OffsetFetchResponseTopic().setName(topic.name()); - data.topics().add(newTopic); + OffsetFetchResponseGroup group = groups.get(0); + data.setErrorCode(group.errorCode()); - topic.partitions().forEach(partition -> { - OffsetFetchResponsePartition newPartition; + group.topics().forEach(topic -> { + OffsetFetchResponseTopic newTopic = new OffsetFetchResponseTopic().setName(topic.name()); + data.topics().add(newTopic); - if (version < 2 && group.errorCode() != Errors.NONE.code()) { - // Versions prior to version 2 do not support a top level error. Therefore, - // we put it at the partition level. - newPartition = new OffsetFetchResponsePartition() - .setPartitionIndex(partition.partitionIndex()) - .setErrorCode(group.errorCode()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH); - } else { - newPartition = new OffsetFetchResponsePartition() + topic.partitions().forEach(partition -> { + newTopic.partitions().add(new OffsetFetchResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(partition.errorCode()) .setCommittedOffset(partition.committedOffset()) .setMetadata(partition.metadata()) - .setCommittedLeaderEpoch(partition.committedLeaderEpoch()); - } - - newTopic.partitions().add(newPartition); + .setCommittedLeaderEpoch(partition.committedLeaderEpoch())); + }); }); - }); + } + + return new OffsetFetchResponse(data, version); } } public OffsetFetchResponse(OffsetFetchResponseData data, short version) { super(ApiKeys.OFFSET_FETCH); this.data = data; - // for version 2 and later use the top-level error code (in ERROR_CODE_KEY_NAME) from the response. - // for older versions there is no top-level error in the response and all errors are partition errors, - // so if there is a group or coordinator error at the partition level use that as the top-level error. - // this way clients can depend on the top-level error regardless of the offset fetch version. - // we return the error differently starting with version 8, so we will only populate the - // error field if we are between version 2 and 7. if we are in version 8 or greater, then - // we will populate the map of group id to error codes. - if (version < 8) { - this.error = version >= 2 ? Errors.forCode(data.errorCode()) : topLevelError(data); + this.version = version; + } + + public OffsetFetchResponseData.OffsetFetchResponseGroup group(String groupId) { + if (version < BATCH_MIN_VERSION) { + // for version 2 and later use the top-level error code from the response. + // for older versions there is no top-level error in the response and all errors are partition errors, + // so if there is a group or coordinator error at the partition level use that as the top-level error. + // this way clients can depend on the top-level error regardless of the offset fetch version. + // we return the error differently starting with version 8, so we will only populate the + // error field if we are between version 2 and 7. if we are in version 8 or greater, then + // we will populate the map of group id to error codes. + short topLevelError = version < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION ? topLevelError(data).code() : data.errorCode(); + if (topLevelError != Errors.NONE.code()) { + return new OffsetFetchResponseGroup() + .setGroupId(groupId) + .setErrorCode(topLevelError); + } else { + return new OffsetFetchResponseGroup() + .setGroupId(groupId) + .setTopics(data.topics().stream().map(topic -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topic.name()) + .setPartitions(topic.partitions().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition.partitionIndex()) + .setErrorCode(partition.errorCode()) + .setCommittedOffset(partition.committedOffset()) + .setMetadata(partition.metadata()) + .setCommittedLeaderEpoch(partition.committedLeaderEpoch()) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())); + } } else { - for (OffsetFetchResponseGroup group : data.groups()) { - this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode())); + if (groups == null) { + groups = data.groups().stream().collect(Collectors.toMap( + OffsetFetchResponseData.OffsetFetchResponseGroup::groupId, + Function.identity() + )); + } + var group = groups.get(groupId); + if (group == null) { + throw new IllegalArgumentException("Group " + groupId + " not found in the response"); } - this.error = null; + return group; } } @@ -301,98 +187,33 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public boolean hasError() { - return error != Errors.NONE; - } - - public boolean groupHasError(String groupId) { - Errors error = groupLevelErrors.get(groupId); - if (error == null) { - return this.error != null && this.error != Errors.NONE; - } - return error != Errors.NONE; - } - - public Errors error() { - return error; - } - - public Errors groupLevelError(String groupId) { - if (error != null) { - return error; - } - return groupLevelErrors.get(groupId); - } - @Override public Map errorCounts() { Map counts = new EnumMap<>(Errors.class); - if (!groupLevelErrors.isEmpty()) { - // built response with v8 or above - for (Map.Entry entry : groupLevelErrors.entrySet()) { - updateErrorCounts(counts, entry.getValue()); + if (version < BATCH_MIN_VERSION) { + if (version >= TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { + updateErrorCounts(counts, Errors.forCode(data.errorCode())); } - for (OffsetFetchResponseGroup group : data.groups()) { - group.topics().forEach(topic -> - topic.partitions().forEach(partition -> - updateErrorCounts(counts, Errors.forCode(partition.errorCode())))); - } - } else { - // built response with v0-v7 - updateErrorCounts(counts, error); data.topics().forEach(topic -> topic.partitions().forEach(partition -> - updateErrorCounts(counts, Errors.forCode(partition.errorCode())))); - } - return counts; - } - - // package-private for testing purposes - Map responseDataV0ToV7() { - Map responseData = new HashMap<>(); - for (OffsetFetchResponseTopic topic : data.topics()) { - for (OffsetFetchResponsePartition partition : topic.partitions()) { - responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()), - new PartitionData(partition.committedOffset(), - RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), - partition.metadata(), - Errors.forCode(partition.errorCode())) - ); - } - } - return responseData; - } - - private Map buildResponseData(String groupId) { - Map responseData = new HashMap<>(); - OffsetFetchResponseGroup group = data - .groups() - .stream() - .filter(g -> g.groupId().equals(groupId)) - .collect(Collectors.toList()) - .get(0); - for (OffsetFetchResponseTopics topic : group.topics()) { - for (OffsetFetchResponsePartitions partition : topic.partitions()) { - responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()), - new PartitionData(partition.committedOffset(), - RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), - partition.metadata(), - Errors.forCode(partition.errorCode())) + updateErrorCounts(counts, Errors.forCode(partition.errorCode())) + ) + ); + } else { + data.groups().forEach(group -> { + updateErrorCounts(counts, Errors.forCode(group.errorCode())); + group.topics().forEach(topic -> + topic.partitions().forEach(partition -> + updateErrorCounts(counts, Errors.forCode(partition.errorCode())) + ) ); - } - } - return responseData; - } - - public Map partitionDataMap(String groupId) { - if (groupLevelErrors.isEmpty()) { - return responseDataV0ToV7(); + }); } - return buildResponseData(groupId); + return counts; } - public static OffsetFetchResponse parse(ByteBuffer buffer, short version) { - return new OffsetFetchResponse(new OffsetFetchResponseData(new ByteBufferAccessor(buffer), version), version); + public static OffsetFetchResponse parse(Readable readable, short version) { + return new OffsetFetchResponse(new OffsetFetchResponseData(readable, version), version); } @Override @@ -404,4 +225,31 @@ public OffsetFetchResponseData data() { public boolean shouldClientThrottle(short version) { return version >= 4; } + + public static OffsetFetchResponseData.OffsetFetchResponseGroup groupError( + OffsetFetchRequestData.OffsetFetchRequestGroup group, + Errors error, + int version + ) { + if (version >= TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { + return new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group.groupId()) + .setErrorCode(error.code()); + } else { + return new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group.groupId()) + .setTopics(group.topics().stream().map(topic -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topic.name()) + .setPartitions(topic.partitionIndexes().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition) + .setErrorCode(error.code()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java index 9cdfb59460c6a..78f7e3132c85d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java @@ -36,12 +36,6 @@ public class OffsetsForLeaderEpochRequest extends AbstractRequest { */ public static final int CONSUMER_REPLICA_ID = -1; - /** - * Sentinel replica_id which indicates either a debug consumer or a replica which is using - * an old version of the protocol. - */ - public static final int DEBUGGING_REPLICA_ID = -2; - private final OffsetForLeaderEpochRequestData data; public static class Builder extends AbstractRequest.Builder { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java index a3f4323698d33..ff879667b2655 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -73,8 +72,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static OffsetsForLeaderEpochResponse parse(ByteBuffer buffer, short version) { - return new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData(new ByteBufferAccessor(buffer), version)); + public static OffsetsForLeaderEpochResponse parse(Readable readable, short version) { + return new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java index ef735b2465c4d..a39ea157d72fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java @@ -17,7 +17,7 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.InvalidRecordException; -import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.message.ProduceResponseData; @@ -96,7 +96,7 @@ public String toString() { // Care should be taken in methods that use this field. private volatile ProduceRequestData data; // the partitionSizes is lazily initialized since it is used by server-side in production. - private volatile Map partitionSizes; + private volatile Map partitionSizes; public ProduceRequest(ProduceRequestData produceRequestData, short version) { super(ApiKeys.PRODUCE, version); @@ -107,15 +107,20 @@ public ProduceRequest(ProduceRequestData produceRequestData, short version) { } // visible for testing - Map partitionSizes() { + Map partitionSizes() { if (partitionSizes == null) { // this method may be called by different thread (see the comment on data) synchronized (this) { if (partitionSizes == null) { - Map tmpPartitionSizes = new HashMap<>(); + Map tmpPartitionSizes = new HashMap<>(); data.topicData().forEach(topicData -> topicData.partitionData().forEach(partitionData -> - tmpPartitionSizes.compute(new TopicPartition(topicData.name(), partitionData.index()), + // While topic id and name might not be populated at the same time in the request all the time; + // for example on server side they will never be populated together while in produce client they will be, + // to simplify initializing `TopicIdPartition` the code will use both topic name and id. + // TopicId will be Uuid.ZERO_UUID in versions < 13 and topic name will be used as main identifier of topic partition. + // TopicName will be empty string in versions >= 13 and topic id will be used as the main identifier. + tmpPartitionSizes.compute(new TopicIdPartition(topicData.topicId(), partitionData.index(), topicData.name()), (ignored, previousValue) -> partitionData.records().sizeInBytes() + (previousValue == null ? 0 : previousValue)) ) @@ -161,14 +166,14 @@ public ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e) { if (acks == 0) return null; ApiError apiError = ApiError.fromThrowable(e); ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs); - partitionSizes().forEach((tp, ignored) -> { - ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic()); + partitionSizes().forEach((tpId, ignored) -> { + ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tpId.topic(), tpId.topicId()); if (tpr == null) { - tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()); + tpr = new ProduceResponseData.TopicProduceResponse().setName(tpId.topic()).setTopicId(tpId.topicId()); data.responses().add(tpr); } tpr.partitionResponses().add(new ProduceResponseData.PartitionProduceResponse() - .setIndex(tp.partition()) + .setIndex(tpId.partition()) .setRecordErrors(Collections.emptyList()) .setBaseOffset(INVALID_OFFSET) .setLogAppendTimeMs(RecordBatch.NO_TIMESTAMP) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java index 573ec9e434d2f..673b91ac9ab20 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java @@ -17,15 +17,14 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.message.ProduceResponseData.LeaderIdAndEpoch; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.EnumMap; import java.util.List; @@ -56,6 +55,7 @@ * {@link Errors#INVALID_TXN_STATE} * {@link Errors#INVALID_PRODUCER_ID_MAPPING} * {@link Errors#CONCURRENT_TRANSACTIONS} + * {@link Errors#UNKNOWN_TOPIC_ID} */ public class ProduceResponse extends AbstractResponse { public static final long INVALID_OFFSET = -1L; @@ -73,7 +73,7 @@ public ProduceResponse(ProduceResponseData produceResponseData) { * @param responses Produced data grouped by topic-partition */ @Deprecated - public ProduceResponse(Map responses) { + public ProduceResponse(Map responses) { this(responses, DEFAULT_THROTTLE_TIME, Collections.emptyList()); } @@ -84,7 +84,7 @@ public ProduceResponse(Map responses) { * @param throttleTimeMs Time in milliseconds the response was throttled */ @Deprecated - public ProduceResponse(Map responses, int throttleTimeMs) { + public ProduceResponse(Map responses, int throttleTimeMs) { this(toData(responses, throttleTimeMs, Collections.emptyList())); } @@ -97,16 +97,16 @@ public ProduceResponse(Map responses, int thr * @param nodeEndpoints List of node endpoints */ @Deprecated - public ProduceResponse(Map responses, int throttleTimeMs, List nodeEndpoints) { + public ProduceResponse(Map responses, int throttleTimeMs, List nodeEndpoints) { this(toData(responses, throttleTimeMs, nodeEndpoints)); } - private static ProduceResponseData toData(Map responses, int throttleTimeMs, List nodeEndpoints) { + private static ProduceResponseData toData(Map responses, int throttleTimeMs, List nodeEndpoints) { ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs); responses.forEach((tp, response) -> { - ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic()); + ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic(), tp.topicId()); if (tpr == null) { - tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()); + tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()).setTopicId(tp.topicId()); data.responses().add(tpr); } tpr.partitionResponses() @@ -286,8 +286,8 @@ public String toString() { } } - public static ProduceResponse parse(ByteBuffer buffer, short version) { - return new ProduceResponse(new ProduceResponseData(new ByteBufferAccessor(buffer), version)); + public static ProduceResponse parse(Readable readable, short version) { + return new ProduceResponse(new ProduceResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java index e224eed7aebd5..dcd12b42bb68c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.PushTelemetryResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -65,8 +64,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static PushTelemetryResponse parse(ByteBuffer buffer, short version) { + public static PushTelemetryResponse parse(Readable readable, short version) { return new PushTelemetryResponse(new PushTelemetryResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java index e3cc84884e64a..3637da2ca1b49 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new ReadShareGroupStateResponse(new ReadShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -82,8 +78,8 @@ public ReadShareGroupStateRequestData data() { public static ReadShareGroupStateRequest parse(Readable readable, short version) { return new ReadShareGroupStateRequest( - new ReadShareGroupStateRequestData(readable, version), - version + new ReadShareGroupStateRequestData(readable, version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java index cd6f5be9a2c74..2ab20e52e95c3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java @@ -21,12 +21,10 @@ import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumMap; import java.util.List; import java.util.Map; @@ -48,9 +46,9 @@ public ReadShareGroupStateResponseData data() { public Map errorCounts() { Map counts = new EnumMap<>(Errors.class); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -65,54 +63,54 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static ReadShareGroupStateResponse parse(ByteBuffer buffer, short version) { + public static ReadShareGroupStateResponse parse(Readable readable, short version) { return new ReadShareGroupStateResponse( - new ReadShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) + new ReadShareGroupStateResponseData(readable, version) ); } public static ReadShareGroupStateResponseData toResponseData( - Uuid topicId, - int partition, - long startOffset, - int stateEpoch, - List stateBatches + Uuid topicId, + int partition, + long startOffset, + int stateEpoch, + List stateBatches ) { return new ReadShareGroupStateResponseData() - .setResults(Collections.singletonList( - new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(Collections.singletonList( - new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partition) - .setStartOffset(startOffset) - .setStateEpoch(stateEpoch) - .setStateBatches(stateBatches) - )) - )); + .setResults(List.of( + new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partition) + .setStartOffset(startOffset) + .setStateEpoch(stateEpoch) + .setStateBatches(stateBatches) + )) + )); } public static ReadShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { return new ReadShareGroupStateResponseData().setResults( - Collections.singletonList(new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(Collections.singletonList(new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); + List.of(new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicId) + .setPartitions(List.of(new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage))))); } public static ReadShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult(int partitionId, Errors error, String errorMessage) { return new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static ReadShareGroupStateResponseData.ReadStateResult toResponseReadStateResult(Uuid topicId, List partitionResults) { return new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); + .setTopicId(topicId) + .setPartitions(partitionResults); } public static ReadShareGroupStateResponseData toGlobalErrorResponse(ReadShareGroupStateRequestData request, Errors error) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java index e56f0ebe3178f..803b63ac16835 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder errorCounts() { } } - public static RemoveRaftVoterResponse parse(ByteBuffer buffer, short version) { + public static RemoveRaftVoterResponse parse(Readable readable, short version) { return new RemoveRaftVoterResponse( - new RemoveRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); + new RemoveRaftVoterResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java index 2baf992283e90..6eb0dc0e0ed53 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.RenewDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class RenewDelegationTokenResponse extends AbstractResponse { @@ -33,9 +32,9 @@ public RenewDelegationTokenResponse(RenewDelegationTokenResponseData data) { this.data = data; } - public static RenewDelegationTokenResponse parse(ByteBuffer buffer, short version) { + public static RenewDelegationTokenResponse parse(Readable readable, short version) { return new RenewDelegationTokenResponse(new RenewDelegationTokenResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java index d6ca8c170dc45..ba0fc29a39180 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; /** @@ -77,7 +76,7 @@ public SaslAuthenticateResponseData data() { return data; } - public static SaslAuthenticateResponse parse(ByteBuffer buffer, short version) { - return new SaslAuthenticateResponse(new SaslAuthenticateResponseData(new ByteBufferAccessor(buffer), version)); + public static SaslAuthenticateResponse parse(Readable readable, short version) { + return new SaslAuthenticateResponse(new SaslAuthenticateResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java index 5097711e73787..40de2ceff30dd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.SaslHandshakeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -71,7 +70,7 @@ public List enabledMechanisms() { return data.mechanisms(); } - public static SaslHandshakeResponse parse(ByteBuffer buffer, short version) { - return new SaslHandshakeResponse(new SaslHandshakeResponseData(new ByteBufferAccessor(buffer), version)); + public static SaslHandshakeResponse parse(Readable readable, short version) { + return new SaslHandshakeResponse(new SaslHandshakeResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java index c6be8213c0cb7..c73df0b1d5656 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java @@ -17,15 +17,12 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareAcknowledgeRequestData; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -36,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder> ackMap = new HashMap<>(); - + ShareAcknowledgeRequestData.AcknowledgeTopicCollection ackTopics = new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(); for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = ackMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = partMap.get(tip.partition()); + ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = ackTopics.find(tip.topicId()); + if (ackTopic == null) { + ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(tip.topicId()) + .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection()); + ackTopics.add(ackTopic); + } + ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = ackTopic.partitions().find(tip.partition()); if (ackPartition == null) { ackPartition = new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(tip.partition()); - partMap.put(tip.partition(), ackPartition); + ackTopic.partitions().add(ackPartition); } ackPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); } - // Finally, build up the data to fetch - data.setTopics(new ArrayList<>()); - ackMap.forEach((topicId, partMap) -> { - ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - data.topics().add(ackTopic); - - partMap.forEach((index, ackPartition) -> ackTopic.partitions().add(ackPartition)); - }); - - return new ShareAcknowledgeRequest.Builder(data, true); + data.setTopics(ackTopics); + return new ShareAcknowledgeRequest.Builder(data); } public ShareAcknowledgeRequestData data() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java index 994e063aa1381..d303a852b795e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java @@ -18,13 +18,11 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumMap; import java.util.Iterator; @@ -84,9 +82,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ShareAcknowledgeResponse parse(ByteBuffer buffer, short version) { + public static ShareAcknowledgeResponse parse(Readable readable, short version) { return new ShareAcknowledgeResponse( - new ShareAcknowledgeResponseData(new ByteBufferAccessor(buffer), version) + new ShareAcknowledgeResponseData(readable, version) ); } @@ -116,22 +114,21 @@ public static ShareAcknowledgeResponse of(Errors error, public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleTimeMs, Iterator> partIterator, List nodeEndpoints) { - Map topicResponseList = new LinkedHashMap<>(); + ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection topicResponses = new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection(); while (partIterator.hasNext()) { Map.Entry entry = partIterator.next(); ShareAcknowledgeResponseData.PartitionData partitionData = entry.getValue(); // Since PartitionData alone doesn't know the partition ID, we set it here partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse topicResponse = topicResponses.find(entry.getKey().topicId()); + if (topicResponse == null) { + topicResponse = new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); + .setPartitions(new ArrayList<>()); + topicResponses.add(topicResponse); } + topicResponse.partitions().add(partitionData); } ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list @@ -143,6 +140,6 @@ public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleT .setRack(endpoint.rack()))); return data.setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) - .setResponses(new ArrayList<>(topicResponseList.values())); + .setResponses(topicResponses); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index ea8d93f2a91f3..5ede165c2eff0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -37,11 +37,7 @@ public static class Builder extends AbstractRequest.Builder { private final ShareFetchRequestData data; public Builder(ShareFetchRequestData data) { - this(data, false); - } - - public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { - super(ApiKeys.SHARE_FETCH, enableUnstableLastVersion); + super(ApiKeys.SHARE_FETCH); this.data = data; } @@ -66,15 +62,24 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, data.setBatchSize(batchSize); // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index - Map> fetchMap = new HashMap<>(); + ShareFetchRequestData.FetchTopicCollection fetchTopics = new ShareFetchRequestData.FetchTopicCollection(); // First, start by adding the list of topic-partitions we are fetching if (!isClosingShareSession) { for (TopicIdPartition tip : send) { - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() + ShareFetchRequestData.FetchTopic fetchTopic = fetchTopics.find(tip.topicId()); + if (fetchTopic == null) { + fetchTopic = new ShareFetchRequestData.FetchTopic() + .setTopicId(tip.topicId()) + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection()); + fetchTopics.add(fetchTopic); + } + ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().find(tip.partition()); + if (fetchPartition == null) { + fetchPartition = new ShareFetchRequestData.FetchPartition() .setPartitionIndex(tip.partition()); - partMap.put(tip.partition(), fetchPartition); + fetchTopic.partitions().add(fetchPartition); + } } } @@ -82,29 +87,26 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, // topic-partitions will be a subset, but if the assignment changes, there might be new entries to add for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { TopicIdPartition tip = acknowledgeEntry.getKey(); - Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); - ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); + ShareFetchRequestData.FetchTopic fetchTopic = fetchTopics.find(tip.topicId()); + if (fetchTopic == null) { + fetchTopic = new ShareFetchRequestData.FetchTopic() + .setTopicId(tip.topicId()) + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection()); + fetchTopics.add(fetchTopic); + } + ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().find(tip.partition()); if (fetchPartition == null) { fetchPartition = new ShareFetchRequestData.FetchPartition() .setPartitionIndex(tip.partition()); - partMap.put(tip.partition(), fetchPartition); + fetchTopic.partitions().add(fetchPartition); } fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); } // Build up the data to fetch - if (!fetchMap.isEmpty()) { - data.setTopics(new ArrayList<>()); - fetchMap.forEach((topicId, partMap) -> { - ShareFetchRequestData.FetchTopic fetchTopic = new ShareFetchRequestData.FetchTopic() - .setTopicId(topicId) - .setPartitions(new ArrayList<>()); - partMap.forEach((index, fetchPartition) -> fetchTopic.partitions().add(fetchPartition)); - data.topics().add(fetchTopic); - }); - } + data.setTopics(fetchTopics); - Builder builder = new Builder(data, true); + Builder builder = new Builder(data); // And finally, forget the topic-partitions that are no longer in the session if (!forget.isEmpty()) { data.setForgottenTopicsData(new ArrayList<>()); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java index 2bab79ead9bc0..329c5430e7e66 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java @@ -22,13 +22,12 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Records; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.EnumMap; @@ -110,9 +109,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { * *

    This method should only be used in client-side.

    */ - public static ShareFetchResponse parse(ByteBuffer buffer, short version) { + public static ShareFetchResponse parse(Readable readable, short version) { return new ShareFetchResponse( - new ShareFetchResponseData(new ByteBufferAccessor(buffer), version) + new ShareFetchResponseData(readable, version) ); } @@ -169,7 +168,7 @@ public static ShareFetchResponse of(Errors error, private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, Iterator> partIterator, List nodeEndpoints, int acquisitionLockTimeout) { - Map topicResponseList = new LinkedHashMap<>(); + ShareFetchResponseData.ShareFetchableTopicResponseCollection topicResponses = new ShareFetchResponseData.ShareFetchableTopicResponseCollection(); while (partIterator.hasNext()) { Map.Entry entry = partIterator.next(); ShareFetchResponseData.PartitionData partitionData = entry.getValue(); @@ -181,15 +180,14 @@ private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs if (partitionData.records() == null) partitionData.setRecords(MemoryRecords.EMPTY); // Checking if the topic is already present in the map - if (topicResponseList.containsKey(entry.getKey().topicId())) { - topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); - } else { - List partitionResponses = new ArrayList<>(); - partitionResponses.add(partitionData); - topicResponseList.put(entry.getKey().topicId(), new ShareFetchResponseData.ShareFetchableTopicResponse() + ShareFetchResponseData.ShareFetchableTopicResponse topicResponse = topicResponses.find(entry.getKey().topicId()); + if (topicResponse == null) { + topicResponse = new ShareFetchResponseData.ShareFetchableTopicResponse() .setTopicId(entry.getKey().topicId()) - .setPartitions(partitionResponses)); + .setPartitions(new ArrayList<>()); + topicResponses.add(topicResponse); } + topicResponse.partitions().add(partitionData); } ShareFetchResponseData data = new ShareFetchResponseData(); // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list @@ -202,14 +200,14 @@ private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs return data.setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) .setAcquisitionLockTimeoutMs(acquisitionLockTimeout) - .setResponses(new ArrayList<>(topicResponseList.values())); + .setResponses(topicResponses); } public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { return partitionResponse(topicIdPartition.topicPartition().partition(), error); } - public static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { + private static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { return new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition) .setErrorCode(error.code()) diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java index 14dd429b8a4f8..1ad411f86010f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java @@ -32,11 +32,7 @@ public static class Builder extends AbstractRequest.Builder> assignment + ) { + List topicPartitions = assignment.entrySet().stream() + .map(keyValue -> new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(keyValue.getKey()) + .setPartitions(new ArrayList<>(keyValue.getValue()))) + .collect(Collectors.toList()); + + return new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(topicPartitions); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java index cf14373d43750..efee6e521f4fc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -70,9 +69,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static StreamsGroupDescribeResponse parse(ByteBuffer buffer, short version) { + public static StreamsGroupDescribeResponse parse(Readable readable, short version) { return new StreamsGroupDescribeResponse( - new StreamsGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) + new StreamsGroupDescribeResponseData(readable, version) ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java index 760d1e33d224c..32fe55f12cdd4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -72,9 +71,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static StreamsGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { + public static StreamsGroupHeartbeatResponse parse(Readable readable, short version) { return new StreamsGroupHeartbeatResponse(new StreamsGroupHeartbeatResponseData( - new ByteBufferAccessor(buffer), version)); + readable, version)); } public enum Status { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java index 596110242902c..c31092bdbf78a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java @@ -18,10 +18,9 @@ import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Map; public class SyncGroupResponse extends AbstractResponse { @@ -62,8 +61,8 @@ public String toString() { return data.toString(); } - public static SyncGroupResponse parse(ByteBuffer buffer, short version) { - return new SyncGroupResponse(new SyncGroupResponseData(new ByteBufferAccessor(buffer), version)); + public static SyncGroupResponse parse(Readable readable, short version) { + return new SyncGroupResponse(new SyncGroupResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java index ce7dd9e7f1cbb..dea99cf2b0709 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java @@ -21,10 +21,9 @@ import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition; import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -191,8 +190,8 @@ public Map errors() { return errorMap; } - public static TxnOffsetCommitResponse parse(ByteBuffer buffer, short version) { - return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(new ByteBufferAccessor(buffer), version)); + public static TxnOffsetCommitResponse parse(Readable readable, short version) { + return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java index 6ede317f8f9ab..a0f71a7021f94 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.UnregisterBrokerResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -58,8 +57,8 @@ public Map errorCounts() { return errorCounts; } - public static UnregisterBrokerResponse parse(ByteBuffer buffer, short version) { - return new UnregisterBrokerResponse(new UnregisterBrokerResponseData(new ByteBufferAccessor(buffer), version)); + public static UnregisterBrokerResponse parse(Readable readable, short version) { + return new UnregisterBrokerResponse(new UnregisterBrokerResponseData(readable, version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java index 14aa2043358a8..895cfd0f85fb6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java @@ -20,10 +20,9 @@ import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResult; import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResultCollection; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; import java.util.Set; @@ -79,8 +78,8 @@ public UpdateFeaturesResponseData data() { return data; } - public static UpdateFeaturesResponse parse(ByteBuffer buffer, short version) { - return new UpdateFeaturesResponse(new UpdateFeaturesResponseData(new ByteBufferAccessor(buffer), version)); + public static UpdateFeaturesResponse parse(Readable readable, short version) { + return new UpdateFeaturesResponse(new UpdateFeaturesResponseData(readable, version)); } public static UpdateFeaturesResponse createWithErrors(ApiError topLevelError, Set updates, int throttleTimeMs) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java index 5c89caed2ef94..f52157234fa44 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -58,8 +57,8 @@ public Map errorCounts() { } } - public static UpdateRaftVoterResponse parse(ByteBuffer buffer, short version) { + public static UpdateRaftVoterResponse parse(Readable readable, short version) { return new UpdateRaftVoterResponse( - new UpdateRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); + new UpdateRaftVoterResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java index cb0d86c867d51..9f3797dc3c017 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java @@ -19,10 +19,9 @@ import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.EnumMap; import java.util.Map; @@ -76,7 +75,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static VoteResponse parse(ByteBuffer buffer, short version) { - return new VoteResponse(new VoteResponseData(new ByteBufferAccessor(buffer), version)); + public static VoteResponse parse(Readable readable, short version) { + return new VoteResponse(new VoteResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java index 3422eb5d94dee..35619791540d9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java @@ -33,11 +33,7 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new WriteShareGroupStateResponse(new WriteShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -82,8 +78,8 @@ public WriteShareGroupStateRequestData data() { public static WriteShareGroupStateRequest parse(Readable readable, short version) { return new WriteShareGroupStateRequest( - new WriteShareGroupStateRequestData(readable, version), - version + new WriteShareGroupStateRequestData(readable, version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java index 29f160f8f48be..799ec80d228e6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java @@ -21,12 +21,10 @@ import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -48,9 +46,9 @@ public WriteShareGroupStateResponseData data() { public Map errorCounts() { Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -65,49 +63,49 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static WriteShareGroupStateResponse parse(ByteBuffer buffer, short version) { + public static WriteShareGroupStateResponse parse(Readable readable, short version) { return new WriteShareGroupStateResponse( - new WriteShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) + new WriteShareGroupStateResponseData(readable, version) ); } public static WriteShareGroupStateResponseData toResponseData(Uuid topicId, int partitionId) { return new WriteShareGroupStateResponseData() - .setResults(Collections.singletonList( - new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(Collections.singletonList( - new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId))))); + .setResults(List.of( + new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicId) + .setPartitions(List.of( + new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId))))); } public static WriteShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { WriteShareGroupStateResponseData responseData = new WriteShareGroupStateResponseData(); - responseData.setResults(Collections.singletonList(new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(Collections.singletonList(new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); + responseData.setResults(List.of(new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicId) + .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage))))); return responseData; } public static WriteShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult(int partitionId, Errors error, String errorMessage) { return new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static WriteShareGroupStateResponseData.WriteStateResult toResponseWriteStateResult(Uuid topicId, List partitionResults) { return new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); + .setTopicId(topicId) + .setPartitions(partitionResults); } public static WriteShareGroupStateResponseData.PartitionResult toResponsePartitionResult(int partitionId) { return new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId); + .setPartition(partitionId); } public static WriteShareGroupStateResponseData toGlobalErrorResponse(WriteShareGroupStateRequestData request, Errors error) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java index 2bcb00c928a47..d4269ef81cfef 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java @@ -22,10 +22,9 @@ import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerResult; import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumMap; import java.util.HashMap; @@ -126,7 +125,7 @@ public Map errorCounts() { return errorCounts; } - public static WriteTxnMarkersResponse parse(ByteBuffer buffer, short version) { - return new WriteTxnMarkersResponse(new WriteTxnMarkersResponseData(new ByteBufferAccessor(buffer), version)); + public static WriteTxnMarkersResponse parse(Readable readable, short version) { + return new WriteTxnMarkersResponse(new WriteTxnMarkersResponseData(readable, version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java index 029b6881fdb58..865762c5c364c 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java @@ -33,6 +33,7 @@ import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.Configuration; +import static org.apache.kafka.common.security.JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_DEFAULT; @@ -103,15 +104,37 @@ else if (contextModules.length != 1) return defaultContext(contextType, listenerContextName, globalContextName); } - private static void throwIfLoginModuleIsNotAllowed(AppConfigurationEntry appConfigurationEntry) { - Set disallowedLoginModuleList = Arrays.stream( - System.getProperty(DISALLOWED_LOGIN_MODULES_CONFIG, DISALLOWED_LOGIN_MODULES_DEFAULT).split(",")) + @SuppressWarnings("deprecation") + // Visible for testing + static void throwIfLoginModuleIsNotAllowed(AppConfigurationEntry appConfigurationEntry) { + String disallowedProperty = System.getProperty(DISALLOWED_LOGIN_MODULES_CONFIG); + if (disallowedProperty != null) { + LOG.warn("System property '{}' is deprecated and will be removed in a future release. Use '{}' instead.", + DISALLOWED_LOGIN_MODULES_CONFIG, ALLOWED_LOGIN_MODULES_CONFIG); + } + String loginModuleName = appConfigurationEntry.getLoginModuleName().trim(); + String allowedProperty = System.getProperty(ALLOWED_LOGIN_MODULES_CONFIG); + if (allowedProperty != null) { + Set allowedLoginModuleList = Arrays.stream(allowedProperty.split(",")) + .map(String::trim) + .collect(Collectors.toSet()); + if (!allowedLoginModuleList.contains(loginModuleName)) { + throw new IllegalArgumentException(loginModuleName + " is not allowed. Update System property '" + + ALLOWED_LOGIN_MODULES_CONFIG + "' to allow " + loginModuleName); + } + return; + } + if (disallowedProperty == null) { + disallowedProperty = DISALLOWED_LOGIN_MODULES_DEFAULT; + } + Set disallowedLoginModuleList = Arrays.stream(disallowedProperty.split(",")) .map(String::trim) .collect(Collectors.toSet()); - String loginModuleName = appConfigurationEntry.getLoginModuleName().trim(); if (disallowedLoginModuleList.contains(loginModuleName)) { - throw new IllegalArgumentException(loginModuleName + " is not allowed. Update System property '" - + DISALLOWED_LOGIN_MODULES_CONFIG + "' to allow " + loginModuleName); + throw new IllegalArgumentException(loginModuleName + " is not allowed. " + + "The system property '" + DISALLOWED_LOGIN_MODULES_CONFIG + "' is deprecated. " + + "Use the " + ALLOWED_LOGIN_MODULES_CONFIG + " to allow this module. e.g.," + + "-D" + ALLOWED_LOGIN_MODULES_CONFIG + "=" + loginModuleName); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java index cfbca0c6d6185..16c25d06c1ac6 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java @@ -18,7 +18,10 @@ public final class JaasUtils { public static final String JAVA_LOGIN_CONFIG_PARAM = "java.security.auth.login.config"; + @Deprecated(since = "4.2") public static final String DISALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.disallowed.login.modules"; + public static final String ALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.allowed.login.modules"; + @Deprecated(since = "4.2") public static final String DISALLOWED_LOGIN_MODULES_DEFAULT = "com.sun.security.auth.module.JndiLoginModule,com.sun.security.auth.module.LdapLoginModule"; public static final String SERVICE_NAME = "serviceName"; diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java index ec4317268d1d5..92be58ea2dcf0 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java @@ -23,12 +23,8 @@ * Note that the {@link org.apache.kafka.common.Configurable} and {@link java.io.Closeable} * interfaces are respected if implemented. Additionally, implementations must provide a * default no-arg constructor. - * - * Note that custom implementations of {@link KafkaPrincipalBuilder} - * must also implement {@link KafkaPrincipalSerde}, otherwise brokers will not be able to - * forward requests to the controller. */ -public interface KafkaPrincipalBuilder { +public interface KafkaPrincipalBuilder extends KafkaPrincipalSerde { /** * Build a kafka principal from the authentication context. * @param context The authentication context (either {@link SslAuthenticationContext} or diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java index fa654bcb9280d..5ba472263ddeb 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java @@ -25,7 +25,6 @@ import org.apache.kafka.common.security.auth.AuthenticationContext; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder; -import org.apache.kafka.common.security.auth.KafkaPrincipalSerde; import org.apache.kafka.common.security.auth.PlaintextAuthenticationContext; import org.apache.kafka.common.security.auth.SaslAuthenticationContext; import org.apache.kafka.common.security.auth.SslAuthenticationContext; @@ -50,7 +49,7 @@ * * NOTE: This is an internal class and can change without notice. */ -public class DefaultKafkaPrincipalBuilder implements KafkaPrincipalBuilder, KafkaPrincipalSerde { +public class DefaultKafkaPrincipalBuilder implements KafkaPrincipalBuilder { private final KerberosShortNamer kerberosShortNamer; private final SslPrincipalMapper sslPrincipalMapper; diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java index addacd92722c8..25653636b403d 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java @@ -690,7 +690,7 @@ public void setAuthenticationEndAndSessionReauthenticationTimes(long nowNanos) { double pctToUse = pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + RNG.nextDouble() * pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously; sessionLifetimeMsToUse = (long) (positiveSessionLifetimeMs * pctToUse); - clientSessionReauthenticationTimeNanos = authenticationEndNanos + 1000 * 1000 * sessionLifetimeMsToUse; + clientSessionReauthenticationTimeNanos = Math.addExact(authenticationEndNanos, Utils.msToNs(sessionLifetimeMsToUse)); log.debug( "Finished {} with session expiration in {} ms and session re-authentication on or after {} ms", authenticationOrReauthenticationText(), positiveSessionLifetimeMs, sessionLifetimeMsToUse); diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index e2ebaa31cd260..b84b5dc2abc94 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -318,7 +318,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); + return Optional.of(principalBuilder); } @Override @@ -681,7 +681,7 @@ else if (!maxReauthSet) else retvalSessionLifetimeMs = zeroIfNegative(Math.min(credentialExpirationMs - authenticationEndMs, connectionsMaxReauthMs)); - sessionExpirationTimeNanos = authenticationEndNanos + 1000 * 1000 * retvalSessionLifetimeMs; + sessionExpirationTimeNanos = Math.addExact(authenticationEndNanos, Utils.msToNs(retvalSessionLifetimeMs)); } if (credentialExpirationMs != null) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.java similarity index 53% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.java index c7ae8edae9d93..c69db033052a7 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidator.java @@ -15,9 +15,14 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.internals.secured.BasicOAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ClaimValidationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.SerializedJwt; +import org.apache.kafka.common.security.oauthbearer.internals.secured.VerificationKeyResolverFactory; import org.jose4j.jwt.JwtClaims; import org.jose4j.jwt.MalformedClaimException; @@ -27,32 +32,43 @@ import org.jose4j.jwt.consumer.JwtConsumer; import org.jose4j.jwt.consumer.JwtConsumerBuilder; import org.jose4j.jwt.consumer.JwtContext; -import org.jose4j.keys.resolvers.VerificationKeyResolver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collection; import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.Set; +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME; import static org.jose4j.jwa.AlgorithmConstraints.DISALLOW_NONE; /** - * ValidatorAccessTokenValidator is an implementation of {@link AccessTokenValidator} that is used + * {@code BrokerJwtValidator} is an implementation of {@link JwtValidator} that is used * by the broker to perform more extensive validation of the JWT access token that is received * from the client, but ultimately from posting the client credentials to the OAuth/OIDC provider's * token endpoint. * - * The validation steps performed (primary by the jose4j library) are: + * The validation steps performed (primarily by the jose4j library) are: * *
      *
    1. * Basic structural validation of the b64token value as defined in * RFC 6750 Section 2.1 *
    2. - *
    3. Basic conversion of the token into an in-memory data structure
    4. *
    5. - * Presence of scope, exp, subject, iss, and + * Basic conversion of the token into an in-memory data structure + *
    6. + *
    7. + * Presence of scope, exp, subject, iss, and * iat claims *
    8. *
    9. @@ -61,69 +77,51 @@ *
    10. *
    */ +public class BrokerJwtValidator implements JwtValidator { -public class ValidatorAccessTokenValidator implements AccessTokenValidator { + private static final Logger log = LoggerFactory.getLogger(BrokerJwtValidator.class); - private static final Logger log = LoggerFactory.getLogger(ValidatorAccessTokenValidator.class); + private final Optional verificationKeyResolverOpt; - private final JwtConsumer jwtConsumer; + private JwtConsumer jwtConsumer; - private final String scopeClaimName; + private String scopeClaimName; - private final String subClaimName; + private String subClaimName; /** - * Creates a new ValidatorAccessTokenValidator that will be used by the broker for more - * thorough validation of the JWT. - * - * @param clockSkew The optional value (in seconds) to allow for differences - * between the time of the OAuth/OIDC identity provider and - * the broker. If null is provided, the broker - * and the OAUth/OIDC identity provider are assumed to have - * very close clock settings. - * @param expectedAudiences The (optional) set the broker will use to verify that - * the JWT was issued for one of the expected audiences. - * The JWT will be inspected for the standard OAuth - * aud claim and if this value is set, the - * broker will match the value from JWT's aud - * claim to see if there is an exact match. If there is no - * match, the broker will reject the JWT and authentication - * will fail. May be null to not perform any - * check to verify the JWT's aud claim matches any - * fixed set of known/expected audiences. - * @param expectedIssuer The (optional) value for the broker to use to verify that - * the JWT was created by the expected issuer. The JWT will - * be inspected for the standard OAuth iss claim - * and if this value is set, the broker will match it - * exactly against what is in the JWT's iss - * claim. If there is no match, the broker will reject the JWT - * and authentication will fail. May be null to not - * perform any check to verify the JWT's iss claim - * matches a specific issuer. - * @param verificationKeyResolver jose4j-based {@link VerificationKeyResolver} that is used - * to validate the signature matches the contents of the header - * and payload - * @param scopeClaimName Name of the scope claim to use; must be non-null - * @param subClaimName Name of the subject claim to use; must be - * non-null - * - * @see JwtConsumerBuilder - * @see JwtConsumer - * @see VerificationKeyResolver + * A public, no-args constructor is necessary for instantiation via configuration. + */ + public BrokerJwtValidator() { + this.verificationKeyResolverOpt = Optional.empty(); + } + + /* + * Package-visible for testing. */ + BrokerJwtValidator(CloseableVerificationKeyResolver verificationKeyResolver) { + this.verificationKeyResolverOpt = Optional.of(verificationKeyResolver); + } + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + Set expectedAudiences = Set.copyOf(cu.get(SASL_OAUTHBEARER_EXPECTED_AUDIENCE)); + Integer clockSkew = cu.validateInteger(SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, false); + String expectedIssuer = cu.validateString(SASL_OAUTHBEARER_EXPECTED_ISSUER, false); + String scopeClaimName = cu.validateString(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME); + String subClaimName = cu.validateString(SASL_OAUTHBEARER_SUB_CLAIM_NAME); + + CloseableVerificationKeyResolver verificationKeyResolver = verificationKeyResolverOpt.orElseGet( + () -> VerificationKeyResolverFactory.get(configs, saslMechanism, jaasConfigEntries) + ); - public ValidatorAccessTokenValidator(Integer clockSkew, - Set expectedAudiences, - String expectedIssuer, - VerificationKeyResolver verificationKeyResolver, - String scopeClaimName, - String subClaimName) { final JwtConsumerBuilder jwtConsumerBuilder = new JwtConsumerBuilder(); if (clockSkew != null) jwtConsumerBuilder.setAllowedClockSkewInSeconds(clockSkew); - if (expectedAudiences != null && !expectedAudiences.isEmpty()) + if (!expectedAudiences.isEmpty()) jwtConsumerBuilder.setExpectedAudience(expectedAudiences.toArray(new String[0])); if (expectedIssuer != null) @@ -145,11 +143,11 @@ public ValidatorAccessTokenValidator(Integer clockSkew, * * @param accessToken Non-null JWT access token * @return {@link OAuthBearerToken} - * @throws ValidateException Thrown on errors performing validation of given token + * @throws JwtValidatorException Thrown on errors performing validation of given token */ @SuppressWarnings("unchecked") - public OAuthBearerToken validate(String accessToken) throws ValidateException { + public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { SerializedJwt serializedJwt = new SerializedJwt(accessToken); JwtContext jwt; @@ -157,7 +155,7 @@ public OAuthBearerToken validate(String accessToken) throws ValidateException { try { jwt = jwtConsumer.process(serializedJwt.getToken()); } catch (InvalidJwtException e) { - throw new ValidateException(String.format("Could not validate the access token: %s", e.getMessage()), e); + throw new JwtValidatorException(String.format("Could not validate the access token: %s", e.getMessage()), e); } JwtClaims claims = jwt.getJwtClaims(); @@ -190,13 +188,13 @@ else if (scopeRaw instanceof Collection) issuedAt); } - private T getClaim(ClaimSupplier supplier, String claimName) throws ValidateException { + private T getClaim(ClaimSupplier supplier, String claimName) throws JwtValidatorException { try { T value = supplier.get(); log.debug("getClaim - {}: {}", claimName, value); return value; } catch (MalformedClaimException e) { - throw new ValidateException(String.format("Could not extract the '%s' claim from the access token", claimName), e); + throw new JwtValidatorException(String.format("Could not extract the '%s' claim from the access token", claimName), e); } } @@ -205,5 +203,4 @@ public interface ClaimSupplier { T get() throws MalformedClaimException; } - } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.java new file mode 100644 index 0000000000000..4744fd9128999 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientCredentialsJwtRetriever.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ClientCredentialsRequestFormatter; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpJwtRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpRequestFormatter; +import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils; +import org.apache.kafka.common.utils.Utils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_JAAS_CONFIG; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.SCOPE_CONFIG; + +/** + * {@code ClientCredentialsJwtRetriever} is a {@link JwtRetriever} that performs the steps to request + * a JWT from an OAuth/OIDC identity provider using the client_credentials grant type. This + * grant type is commonly used for non-interactive "service accounts" where there is no user available + * to interactively supply credentials. + * + *

    + * + * This {@code JwtRetriever} is enabled by specifying its class name in the Kafka configuration. + * For client use, specify the class name in the sasl.oauthbearer.jwt.retriever.class + * configuration like so: + * + *

    + * sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + * 
    + * + *

    + * + * If using this {@code JwtRetriever} on the broker side (for inter-broker communication), the configuration + * should be specified with a listener-based property: + * + *

    + * listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + * 
    + * + *

    + * + * The {@code ClientCredentialsJwtRetriever} also uses the following configuration: + * + *

      + *
    • sasl.oauthbearer.client.credentials.client.id
    • + *
    • sasl.oauthbearer.client.credentials.client.secret
    • + *
    • sasl.oauthbearer.scope
    • + *
    • sasl.oauthbearer.token.endpoint.url
    • + *
    + * + * Please refer to the official Apache Kafka documentation for more information on these, and related configuration. + * + *

    + * + * Previous versions of this implementation used sasl.jaas.config to specify attributes such + * as clientId, clientSecret, and scope. These will still work, but + * if the configuration for each of these is specified, it will be used instead of the JAAS option. + * + *

    + * + * Here's an example of the JAAS configuration for a Kafka client: + * + *

    + * sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
    + *
    + * sasl.oauthbearer.client.credentials.client.id=jdoe
    + * sasl.oauthbearer.client.credentials.client.secret=$3cr3+
    + * sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
    + * sasl.oauthbearer.scope=my-application-scope
    + * sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
    + * 
    + */ +public class ClientCredentialsJwtRetriever implements JwtRetriever { + + private static final Logger LOG = LoggerFactory.getLogger(ClientCredentialsJwtRetriever.class); + + private HttpJwtRetriever delegate; + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + JaasOptionsUtils jou = new JaasOptionsUtils(saslMechanism, jaasConfigEntries); + + ConfigOrJaas configOrJaas = new ConfigOrJaas(cu, jou); + String clientId = configOrJaas.clientId(); + String clientSecret = configOrJaas.clientSecret(); + String scope = configOrJaas.scope(); + boolean urlencodeHeader = validateUrlencodeHeader(cu); + + HttpRequestFormatter requestFormatter = new ClientCredentialsRequestFormatter( + clientId, + clientSecret, + scope, + urlencodeHeader + ); + + delegate = new HttpJwtRetriever(requestFormatter); + delegate.configure(configs, saslMechanism, jaasConfigEntries); + } + + @Override + public String retrieve() throws JwtRetrieverException { + if (delegate == null) + throw new IllegalStateException("JWT retriever delegate is null; please call configure() first"); + + return delegate.retrieve(); + } + + @Override + public void close() throws IOException { + Utils.closeQuietly(delegate, "JWT retriever delegate"); + } + + /** + * In some cases, the incoming {@link Map} doesn't contain a value for + * {@link SaslConfigs#SASL_OAUTHBEARER_HEADER_URLENCODE}. Returning {@code null} from {@link Map#get(Object)} + * will cause a {@link NullPointerException} when it is later unboxed. + * + *

    + * + * This utility method ensures that we have a non-{@code null} value to use in the + * {@link HttpJwtRetriever} constructor. + */ + static boolean validateUrlencodeHeader(ConfigurationUtils configurationUtils) { + Boolean urlencodeHeader = configurationUtils.get(SASL_OAUTHBEARER_HEADER_URLENCODE); + return Objects.requireNonNullElse(urlencodeHeader, DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE); + } + + /** + * Retrieves the values first from configuration, then falls back to JAAS, and, if required, throws an error. + */ + private static class ConfigOrJaas { + + private final ConfigurationUtils cu; + private final JaasOptionsUtils jou; + + private ConfigOrJaas(ConfigurationUtils cu, JaasOptionsUtils jou) { + this.cu = cu; + this.jou = jou; + } + + private String clientId() { + return getValue( + SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, + CLIENT_ID_CONFIG, + true, + cu::validateString, + jou::validateString + ); + } + + private String clientSecret() { + return getValue( + SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, + CLIENT_SECRET_CONFIG, + true, + cu::validatePassword, + jou::validateString + ); + } + + private String scope() { + return getValue( + SASL_OAUTHBEARER_SCOPE, + SCOPE_CONFIG, + false, + cu::validateString, + jou::validateString + ); + } + + private String getValue(String configName, + String jaasName, + boolean isRequired, + Function configValueGetter, + Function jaasValueGetter) { + boolean isPresentInConfig = cu.containsKey(configName); + boolean isPresentInJaas = jou.containsKey(jaasName); + + if (isPresentInConfig) { + if (isPresentInJaas) { + // Log if the user is using the deprecated JAAS option. + LOG.warn( + "Both the OAuth configuration {} as well as the JAAS option {} (from the {} configuration) were provided. " + + "Since the {} JAAS option is deprecated, it will be ignored and the value from the {} configuration will be used. " + + "Please update your configuration to only use {}.", + configName, + jaasName, + SASL_JAAS_CONFIG, + jaasName, + configName, + configName + ); + } + + return configValueGetter.apply(configName); + } else if (isPresentInJaas) { + String value = jaasValueGetter.apply(jaasName); + + // Log if the user is using the deprecated JAAS option. + LOG.warn( + "The OAuth JAAS option {} was configured in {}, but that JAAS option is deprecated and will be removed. " + + "Please update your configuration to use the {} configuration instead.", + jaasName, + SASL_JAAS_CONFIG, + configName + ); + + return value; + } else if (isRequired) { + throw new ConfigException(configName, null); + } else { + return null; + } + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.java similarity index 70% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.java index 773311ff0ab18..53cd88f24dd9c 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidator.java @@ -15,9 +15,12 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.internals.secured.BasicOAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ClaimValidationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.SerializedJwt; import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerIllegalTokenException; import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredJws; @@ -26,14 +29,19 @@ import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; +import javax.security.auth.login.AppConfigurationEntry; + import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME; /** - * LoginAccessTokenValidator is an implementation of {@link AccessTokenValidator} that is used + * {@code ClientJwtValidator} is an implementation of {@link JwtValidator} that is used * by the client to perform some rudimentary validation of the JWT access token that is received * as part of the response from posting the client credentials to the OAuth/OIDC provider's * token endpoint. @@ -46,33 +54,33 @@ * RFC 6750 Section 2.1 *

  • *
  • Basic conversion of the token into an in-memory map
  • - *
  • Presence of scope, exp, subject, and iat claims
  • + *
  • Presence of scope, exp, subject, and iat claims
  • * */ -public class LoginAccessTokenValidator implements AccessTokenValidator { +public class ClientJwtValidator implements JwtValidator { - private static final Logger log = LoggerFactory.getLogger(LoginAccessTokenValidator.class); + private static final Logger log = LoggerFactory.getLogger(ClientJwtValidator.class); public static final String EXPIRATION_CLAIM_NAME = "exp"; public static final String ISSUED_AT_CLAIM_NAME = "iat"; - private final String scopeClaimName; - - private final String subClaimName; - - /** - * Creates a new LoginAccessTokenValidator that will be used by the client for lightweight - * validation of the JWT. - * - * @param scopeClaimName Name of the scope claim to use; must be non-null - * @param subClaimName Name of the subject claim to use; must be non-null - */ - - public LoginAccessTokenValidator(String scopeClaimName, String subClaimName) { - this.scopeClaimName = ClaimValidationUtils.validateClaimNameOverride(DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, scopeClaimName); - this.subClaimName = ClaimValidationUtils.validateClaimNameOverride(DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, subClaimName); + private String scopeClaimName; + + private String subClaimName; + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + this.scopeClaimName = ClaimValidationUtils.validateClaimNameOverride( + DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, + cu.get(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME) + ); + this.subClaimName = ClaimValidationUtils.validateClaimNameOverride( + DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, + cu.get(SASL_OAUTHBEARER_SUB_CLAIM_NAME) + ); } /** @@ -81,18 +89,18 @@ public LoginAccessTokenValidator(String scopeClaimName, String subClaimName) { * * @param accessToken Non-null JWT access token * @return {@link OAuthBearerToken} - * @throws ValidateException Thrown on errors performing validation of given token + * @throws JwtValidatorException Thrown on errors performing validation of given token */ @SuppressWarnings("unchecked") - public OAuthBearerToken validate(String accessToken) throws ValidateException { + public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { SerializedJwt serializedJwt = new SerializedJwt(accessToken); Map payload; try { payload = OAuthBearerUnsecuredJws.toMap(serializedJwt.getPayload()); } catch (OAuthBearerIllegalTokenException e) { - throw new ValidateException(String.format("Could not validate the access token: %s", e.getMessage()), e); + throw new JwtValidatorException(String.format("Could not validate the access token: %s", e.getMessage()), e); } Object scopeRaw = getClaim(payload, scopeClaimName); diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.java new file mode 100644 index 0000000000000..5044a36aba515 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetriever.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.ClientCredentialsRequestFormatter; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.utils.Utils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URL; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; + +/** + * {@code DefaultJwtRetriever} instantiates and delegates {@link JwtRetriever} API calls to an embedded implementation + * based on configuration: + * + *
      + *
    • + * If the value of sasl.oauthbearer.token.endpoint.url is set to a value that starts with the + * file protocol (e.g. file:/tmp/path/to/a/static-jwt.json), an instance of + * {@link FileJwtRetriever} will be used as the underlying {@link JwtRetriever}. Otherwise, the URL is + * assumed to be an HTTP/HTTPS-based URL, and an instance of {@link ClientCredentialsRequestFormatter} will + * be created and used. + *
    • + *
    + * + * The configuration required by the individual {@code JwtRetriever} classes will likely differ. Please refer to the + * official Apache Kafka documentation for more information on these, and related configuration. + */ +public class DefaultJwtRetriever implements JwtRetriever { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultJwtRetriever.class); + + private JwtRetriever delegate; + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + URL tokenEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL); + + if (tokenEndpointUrl.getProtocol().toLowerCase(Locale.ROOT).equals("file")) + delegate = new FileJwtRetriever(); + else + delegate = new ClientCredentialsJwtRetriever(); + + LOG.debug("Created instance of {} as delegate", delegate.getClass().getName()); + delegate.configure(configs, saslMechanism, jaasConfigEntries); + } + + @Override + public String retrieve() throws JwtRetrieverException { + if (delegate == null) + throw new IllegalStateException("JWT retriever delegate is null; please call configure() first"); + + return delegate.retrieve(); + } + + @Override + public void close() throws IOException { + Utils.closeQuietly(delegate, "JWT retriever delegate"); + } + + JwtRetriever delegate() { + return delegate; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.java new file mode 100644 index 0000000000000..478a0fdc91612 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidator.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; +import org.apache.kafka.common.utils.Utils; + +import org.jose4j.keys.resolvers.VerificationKeyResolver; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.security.auth.login.AppConfigurationEntry; + +/** + * This {@link JwtValidator} uses the delegation approach, instantiating and delegating calls to a + * more concrete implementation. The underlying implementation is determined by the presence/absence + * of the {@link VerificationKeyResolver}: if it's present, a {@link BrokerJwtValidator} is + * created, otherwise a {@link ClientJwtValidator} is created. + */ +public class DefaultJwtValidator implements JwtValidator { + + private final Optional verificationKeyResolver; + + private JwtValidator delegate; + + public DefaultJwtValidator() { + this.verificationKeyResolver = Optional.empty(); + } + + public DefaultJwtValidator(CloseableVerificationKeyResolver verificationKeyResolver) { + this.verificationKeyResolver = Optional.of(verificationKeyResolver); + } + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + if (verificationKeyResolver.isPresent()) { + delegate = new BrokerJwtValidator(verificationKeyResolver.get()); + } else { + delegate = new ClientJwtValidator(); + } + + delegate.configure(configs, saslMechanism, jaasConfigEntries); + } + + @Override + public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { + if (delegate == null) + throw new IllegalStateException("JWT validator delegate is null; please call configure() first"); + + return delegate.validate(accessToken); + } + + @Override + public void close() throws IOException { + Utils.closeQuietly(delegate, "JWT validator delegate"); + } + + JwtValidator delegate() { + return delegate; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.java new file mode 100644 index 0000000000000..eeaee1cfb53e3 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/FileJwtRetriever.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; + +import java.io.File; +import java.util.List; +import java.util.Map; + +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.RefreshPolicy.lastModifiedPolicy; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.STRING_JSON_VALIDATING_TRANSFORMER; + +/** + * FileJwtRetriever is an {@link JwtRetriever} that will load the contents + * of a file, interpreting them as a JWT access key in the serialized form. + */ +public class FileJwtRetriever implements JwtRetriever { + + private CachedFile jwtFile; + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + File file = cu.validateFileUrl(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL); + jwtFile = new CachedFile<>(file, STRING_JSON_VALIDATING_TRANSFORMER, lastModifiedPolicy()); + } + + @Override + public String retrieve() throws JwtRetrieverException { + if (jwtFile == null) + throw new IllegalStateException("JWT is null; please call configure() first"); + + try { + return jwtFile.transformed(); + } catch (Exception e) { + throw new JwtRetrieverException(e); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.java new file mode 100644 index 0000000000000..b6eb3a3937231 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetriever.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpJwtRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpRequestFormatter; +import org.apache.kafka.common.security.oauthbearer.internals.secured.JwtBearerRequestFormatter; +import org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionCreator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionJwtTemplate; +import org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.DefaultAssertionCreator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.FileAssertionCreator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.StaticAssertionJwtTemplate; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Utils; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.layeredAssertionJwtTemplate; + +/** + * {@code JwtBearerJwtRetriever} is a {@link JwtRetriever} that performs the steps to request + * a JWT from an OAuth/OIDC identity provider using the urn:ietf:params:oauth:grant-type:jwt-bearer + * grant type. This grant type is used for machine-to-machine "service accounts". + * + *

    + * + * This {@code JwtRetriever} is enabled by specifying its class name in the Kafka configuration. + * For client use, specify the class name in the sasl.oauthbearer.jwt.retriever.class + * configuration like so: + * + *

    + * sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + * 
    + * + *

    + * + * If using this {@code JwtRetriever} on the broker side (for inter-broker communication), the configuration + * should be specified with a listener-based property: + * + *

    + * listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + * 
    + * + *

    + * + * The {@code JwtBearerJwtRetriever} also uses the following configuration: + * + *

      + *
    • sasl.oauthbearer.assertion.algorithm
    • + *
    • sasl.oauthbearer.assertion.claim.aud
    • + *
    • sasl.oauthbearer.assertion.claim.exp.seconds
    • + *
    • sasl.oauthbearer.assertion.claim.iss
    • + *
    • sasl.oauthbearer.assertion.claim.jti.include
    • + *
    • sasl.oauthbearer.assertion.claim.nbf.seconds
    • + *
    • sasl.oauthbearer.assertion.claim.sub
    • + *
    • sasl.oauthbearer.assertion.file
    • + *
    • sasl.oauthbearer.assertion.private.key.file
    • + *
    • sasl.oauthbearer.assertion.private.key.passphrase
    • + *
    • sasl.oauthbearer.assertion.template.file
    • + *
    • sasl.oauthbearer.jwt.retriever.class
    • + *
    • sasl.oauthbearer.scope
    • + *
    • sasl.oauthbearer.token.endpoint.url
    • + *
    + * + * Please refer to the official Apache Kafka documentation for more information on these, and related, configuration. + * + *

    + * + * Here's an example of the JAAS configuration for a Kafka client: + * + *

    + * sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
    + *
    + * sasl.oauthbearer.assertion.algorithm=RS256
    + * sasl.oauthbearer.assertion.claim.aud=my-application-audience
    + * sasl.oauthbearer.assertion.claim.exp.seconds=600
    + * sasl.oauthbearer.assertion.claim.iss=my-oauth-issuer
    + * sasl.oauthbearer.assertion.claim.jti.include=true
    + * sasl.oauthbearer.assertion.claim.nbf.seconds=120
    + * sasl.oauthbearer.assertion.claim.sub=kafka-app-1234
    + * sasl.oauthbearer.assertion.private.key.file=/path/to/private.key
    + * sasl.oauthbearer.assertion.private.key.passphrase=$3cr3+
    + * sasl.oauthbearer.assertion.template.file=/path/to/assertion-template.json
    + * sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
    + * sasl.oauthbearer.scope=my-application-scope
    + * sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
    + * 
    + */ +public class JwtBearerJwtRetriever implements JwtRetriever { + + private final Time time; + private HttpJwtRetriever delegate; + private AssertionJwtTemplate assertionJwtTemplate; + private AssertionCreator assertionCreator; + + public JwtBearerJwtRetriever() { + this(Time.SYSTEM); + } + + public JwtBearerJwtRetriever(Time time) { + this.time = time; + } + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + + String scope = cu.validateString(SASL_OAUTHBEARER_SCOPE, false); + + if (cu.validateString(SASL_OAUTHBEARER_ASSERTION_FILE, false) != null) { + File assertionFile = cu.validateFile(SASL_OAUTHBEARER_ASSERTION_FILE); + assertionCreator = new FileAssertionCreator(assertionFile); + assertionJwtTemplate = new StaticAssertionJwtTemplate(); + } else { + String algorithm = cu.validateString(SASL_OAUTHBEARER_ASSERTION_ALGORITHM); + File privateKeyFile = cu.validateFile(SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE); + Optional passphrase = cu.containsKey(SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE) ? + Optional.of(cu.validatePassword(SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE)) : + Optional.empty(); + + assertionCreator = new DefaultAssertionCreator(algorithm, privateKeyFile, passphrase); + assertionJwtTemplate = layeredAssertionJwtTemplate(cu, time); + } + + Supplier assertionSupplier = () -> { + try { + return assertionCreator.create(assertionJwtTemplate); + } catch (Exception e) { + throw new JwtRetrieverException(e); + } + }; + + HttpRequestFormatter requestFormatter = new JwtBearerRequestFormatter(scope, assertionSupplier); + + delegate = new HttpJwtRetriever(requestFormatter); + delegate.configure(configs, saslMechanism, jaasConfigEntries); + } + + @Override + public String retrieve() throws JwtRetrieverException { + if (delegate == null) + throw new IllegalStateException("JWT retriever delegate is null; please call configure() first"); + + return delegate.retrieve(); + } + + @Override + public void close() throws IOException { + Utils.closeQuietly(assertionCreator, "JWT assertion creator"); + Utils.closeQuietly(assertionJwtTemplate, "JWT assertion template"); + Utils.closeQuietly(delegate, "JWT retriever delegate"); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetriever.java similarity index 64% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetriever.java index 080ea4515b4dd..7510a27883c4c 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetriever.java @@ -15,26 +15,27 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpJwtRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable; -import java.io.Closeable; -import java.io.IOException; /** - * An AccessTokenRetriever is the internal API by which the login module will + * A JwtRetriever is the internal API by which the login module will * retrieve an access token for use in authorization by the broker. The implementation may * involve authentication to a remote system, or it can be as simple as loading the contents * of a file or configuration setting. * * Retrieval is a separate concern from validation, so it isn't necessary for - * the AccessTokenRetriever implementation to validate the integrity of the JWT + * the JwtRetriever implementation to validate the integrity of the JWT * access token. * - * @see HttpAccessTokenRetriever - * @see FileTokenRetriever + * @see HttpJwtRetriever + * @see FileJwtRetriever */ -public interface AccessTokenRetriever extends Initable, Closeable { +public interface JwtRetriever extends OAuthBearerConfigurable { /** * Retrieves a JWT access token in its serialized three-part form. The implementation @@ -48,21 +49,8 @@ public interface AccessTokenRetriever extends Initable, Closeable { * * @return Non-null JWT access token string * - * @throws IOException Thrown on errors related to IO during retrieval + * @throws JwtRetrieverException Thrown on errors related to IO during retrieval */ - String retrieve() throws IOException; - - /** - * Lifecycle method to perform a clean shutdown of the retriever. This must - * be performed by the caller to ensure the correct state, freeing up and releasing any - * resources performed in {@link #init()}. - * - * @throws IOException Thrown on errors related to IO during closure - */ - - default void close() throws IOException { - // This method left intentionally blank. - } - + String retrieve() throws JwtRetrieverException; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.java new file mode 100644 index 0000000000000..a83844fb4150a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtRetrieverException.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.KafkaException; + +/** + * A {@code JwtRetrieverException} is thrown in cases where the JWT cannot be retrieved. + * + * @see JwtRetriever#retrieve() + */ +public class JwtRetrieverException extends KafkaException { + + public JwtRetrieverException(String message) { + super(message); + } + + public JwtRetrieverException(Throwable cause) { + super(cause); + } + + public JwtRetrieverException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidator.java similarity index 74% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidator.java index 0b107a09bc065..2d74e414913d7 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidator.java @@ -15,12 +15,13 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerConfigurable; -import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; /** - * An instance of AccessTokenValidator acts as a function object that, given an access + * An instance of JwtValidator acts as a function object that, given an access * token in base-64 encoded JWT format, can parse the data, perform validation, and construct an * {@link OAuthBearerToken} for use by the caller. * @@ -40,13 +41,12 @@ *
  • RFC 6750, Section 2.1
  • * * - * @see LoginAccessTokenValidator A basic AccessTokenValidator used by client-side login - * authentication - * @see ValidatorAccessTokenValidator A more robust AccessTokenValidator that is used on the broker - * to validate the token's contents and verify the signature + * @see ClientJwtValidator A basic JwtValidator used by client-side login authentication + * @see BrokerJwtValidator A more robust JwtValidator that is used on the broker to validate the token's + * contents and verify the signature */ -public interface AccessTokenValidator { +public interface JwtValidator extends OAuthBearerConfigurable { /** * Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an @@ -56,9 +56,8 @@ public interface AccessTokenValidator { * * @return {@link OAuthBearerToken} * - * @throws ValidateException Thrown on errors performing validation of given token + * @throws JwtValidatorException Thrown on errors performing validation of given token */ - OAuthBearerToken validate(String accessToken) throws ValidateException; - + OAuthBearerToken validate(String accessToken) throws JwtValidatorException; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.java similarity index 59% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.java index 430b9007830cb..6aef68aaf4b40 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorException.java @@ -15,33 +15,32 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; import org.apache.kafka.common.KafkaException; import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; /** - * ValidateException is thrown in cases where a JWT access token cannot be determined to be - * valid for one reason or another. It is intended to be used when errors arise within the - * processing of a {@link javax.security.auth.callback.CallbackHandler#handle(Callback[])}. - * This error, however, is not thrown from that method directly. + * A {@code JwtValidatorException} is thrown in cases where the validity of a JWT cannot be + * determined. It is intended to be used when errors arise within the processing of a + * {@link CallbackHandler#handle(Callback[])}. This error, however, is not thrown from that + * method directly. * - * @see AccessTokenValidator#validate(String) + * @see JwtValidator#validate(String) */ +public class JwtValidatorException extends KafkaException { -public class ValidateException extends KafkaException { - - public ValidateException(String message) { + public JwtValidatorException(String message) { super(message); } - public ValidateException(Throwable cause) { + public JwtValidatorException(Throwable cause) { super(cause); } - public ValidateException(String message, Throwable cause) { + public JwtValidatorException(String message, Throwable cause) { super(message, cause); } - } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java index fc9e689611520..6afd31df273f9 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java @@ -17,19 +17,15 @@ package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.security.auth.SaslExtensions; import org.apache.kafka.common.security.auth.SaslExtensionsCallback; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetriever; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetrieverFactory; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils; -import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException; +import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,13 +41,14 @@ import javax.security.sasl.SaslException; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils.getConfiguredInstance; /** *

    * OAuthBearerLoginCallbackHandler is an {@link AuthenticateCallbackHandler} that * accepts {@link OAuthBearerTokenCallback} and {@link SaslExtensionsCallback} callbacks to * perform the steps to request a JWT from an OAuth/OIDC provider using the - * clientcredentials. This grant type is commonly used for non-interactive + * client_credentials. This grant type is commonly used for non-interactive * "service accounts" where there is no user available to interactively supply credentials. *

    * @@ -179,55 +176,56 @@ public class OAuthBearerLoginCallbackHandler implements AuthenticateCallbackHand private Map moduleOptions; - private AccessTokenRetriever accessTokenRetriever; + private JwtRetriever jwtRetriever; - private AccessTokenValidator accessTokenValidator; - - private boolean isInitialized = false; + private JwtValidator jwtValidator; @Override public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); - AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, saslMechanism, moduleOptions); - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism); - init(accessTokenRetriever, accessTokenValidator); - } - - public void init(AccessTokenRetriever accessTokenRetriever, AccessTokenValidator accessTokenValidator) { - this.accessTokenRetriever = accessTokenRetriever; - this.accessTokenValidator = accessTokenValidator; - - try { - this.accessTokenRetriever.init(); - } catch (IOException e) { - throw new KafkaException("The OAuth login configuration encountered an error when initializing the AccessTokenRetriever", e); - } - - isInitialized = true; + jwtRetriever = getConfiguredInstance( + configs, + saslMechanism, + jaasConfigEntries, + SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, + JwtRetriever.class + ); + + jwtValidator = getConfiguredInstance( + configs, + saslMechanism, + jaasConfigEntries, + SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, + JwtValidator.class + ); } /* * Package-visible for testing. */ - - AccessTokenRetriever getAccessTokenRetriever() { - return accessTokenRetriever; + void configure(Map configs, + String saslMechanism, + List jaasConfigEntries, + JwtRetriever jwtRetriever, + JwtValidator jwtValidator) { + this.moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); + + this.jwtRetriever = jwtRetriever; + this.jwtRetriever.configure(configs, saslMechanism, jaasConfigEntries); + + this.jwtValidator = jwtValidator; + this.jwtValidator.configure(configs, saslMechanism, jaasConfigEntries); } @Override public void close() { - if (accessTokenRetriever != null) { - try { - this.accessTokenRetriever.close(); - } catch (IOException e) { - log.warn("The OAuth login configuration encountered an error when closing the AccessTokenRetriever", e); - } - } + Utils.closeQuietly(jwtRetriever, "JWT retriever"); + Utils.closeQuietly(jwtValidator, "JWT validator"); } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - checkInitialized(); + checkConfigured(); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) { @@ -241,20 +239,20 @@ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallback } private void handleTokenCallback(OAuthBearerTokenCallback callback) throws IOException { - checkInitialized(); - String accessToken = accessTokenRetriever.retrieve(); + checkConfigured(); + String accessToken = jwtRetriever.retrieve(); try { - OAuthBearerToken token = accessTokenValidator.validate(accessToken); + OAuthBearerToken token = jwtValidator.validate(accessToken); callback.token(token); - } catch (ValidateException e) { + } catch (JwtValidatorException e) { log.warn(e.getMessage(), e); callback.error("invalid_token", e.getMessage(), null); } } private void handleExtensionsCallback(SaslExtensionsCallback callback) { - checkInitialized(); + checkConfigured(); Map extensions = new HashMap<>(); @@ -286,9 +284,9 @@ private void handleExtensionsCallback(SaslExtensionsCallback callback) { callback.extensions(saslExtensions); } - private void checkInitialized() { - if (!isInitialized) - throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName())); + private void checkConfigured() { + if (moduleOptions == null || jwtRetriever == null || jwtValidator == null) + throw new IllegalStateException(String.format("To use %s, first call the configure method", getClass().getSimpleName())); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java index f9422370db18b..60fa8cdb6788a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java @@ -17,34 +17,24 @@ package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; -import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils; -import org.apache.kafka.common.security.oauthbearer.internals.secured.RefreshingHttpsJwksVerificationKeyResolver; -import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException; -import org.apache.kafka.common.security.oauthbearer.internals.secured.VerificationKeyResolverFactory; +import org.apache.kafka.common.utils.Utils; -import org.jose4j.jws.JsonWebSignature; -import org.jose4j.jwx.JsonWebStructure; -import org.jose4j.lang.UnresolvableKeyException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; -import java.security.Key; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.auth.login.AppConfigurationEntry; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils.getConfiguredInstance; + /** *

    * OAuthBearerValidatorCallbackHandler is an {@link AuthenticateCallbackHandler} that @@ -108,64 +98,45 @@ public class OAuthBearerValidatorCallbackHandler implements AuthenticateCallback private static final Logger log = LoggerFactory.getLogger(OAuthBearerValidatorCallbackHandler.class); - /** - * Because a {@link CloseableVerificationKeyResolver} instance can spawn threads and issue - * HTTP(S) calls ({@link RefreshingHttpsJwksVerificationKeyResolver}), we only want to create - * a new instance for each particular set of configuration. Because each set of configuration - * may have multiple instances, we want to reuse the single instance. - */ - - private static final Map VERIFICATION_KEY_RESOLVER_CACHE = new HashMap<>(); - private CloseableVerificationKeyResolver verificationKeyResolver; - private AccessTokenValidator accessTokenValidator; - - private boolean isInitialized = false; + private JwtValidator jwtValidator; @Override public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { - Map moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); - CloseableVerificationKeyResolver verificationKeyResolver; - - // Here's the logic which keeps our VerificationKeyResolvers down to a single instance. - synchronized (VERIFICATION_KEY_RESOLVER_CACHE) { - VerificationKeyResolverKey key = new VerificationKeyResolverKey(configs, moduleOptions); - verificationKeyResolver = VERIFICATION_KEY_RESOLVER_CACHE.computeIfAbsent(key, k -> - new RefCountingVerificationKeyResolver(VerificationKeyResolverFactory.create(configs, saslMechanism, moduleOptions))); - } - - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism, verificationKeyResolver); - init(verificationKeyResolver, accessTokenValidator); + jwtValidator = getConfiguredInstance( + configs, + saslMechanism, + jaasConfigEntries, + SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, + JwtValidator.class + ); } - public void init(CloseableVerificationKeyResolver verificationKeyResolver, AccessTokenValidator accessTokenValidator) { + /* + * Package-visible for testing. + */ + void configure(Map configs, + String saslMechanism, + List jaasConfigEntries, + CloseableVerificationKeyResolver verificationKeyResolver, + JwtValidator jwtValidator) { this.verificationKeyResolver = verificationKeyResolver; - this.accessTokenValidator = accessTokenValidator; - - try { - verificationKeyResolver.init(); - } catch (Exception e) { - throw new KafkaException("The OAuth validator configuration encountered an error when initializing the VerificationKeyResolver", e); - } + this.verificationKeyResolver.configure(configs, saslMechanism, jaasConfigEntries); - isInitialized = true; + this.jwtValidator = jwtValidator; + this.jwtValidator.configure(configs, saslMechanism, jaasConfigEntries); } @Override public void close() { - if (verificationKeyResolver != null) { - try { - verificationKeyResolver.close(); - } catch (Exception e) { - log.error(e.getMessage(), e); - } - } + Utils.closeQuietly(jwtValidator, "JWT validator"); + Utils.closeQuietly(verificationKeyResolver, "JWT verification key resolver"); } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - checkInitialized(); + checkConfigured(); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerValidatorCallback) { @@ -179,102 +150,27 @@ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallback } private void handleValidatorCallback(OAuthBearerValidatorCallback callback) { - checkInitialized(); + checkConfigured(); OAuthBearerToken token; try { - token = accessTokenValidator.validate(callback.tokenValue()); + token = jwtValidator.validate(callback.tokenValue()); callback.token(token); - } catch (ValidateException e) { + } catch (JwtValidatorException e) { log.warn(e.getMessage(), e); callback.error("invalid_token", null, null); } } private void handleExtensionsValidatorCallback(OAuthBearerExtensionsValidatorCallback extensionsValidatorCallback) { - checkInitialized(); + checkConfigured(); extensionsValidatorCallback.inputExtensions().map().forEach((extensionName, v) -> extensionsValidatorCallback.valid(extensionName)); } - private void checkInitialized() { - if (!isInitialized) - throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName())); + private void checkConfigured() { + if (jwtValidator == null) + throw new IllegalStateException(String.format("To use %s, first call the configure method", getClass().getSimpleName())); } - - /** - * VkrKey is a simple structure which encapsulates the criteria for different - * sets of configuration. This will allow us to use this object as a key in a {@link Map} - * to keep a single instance per key. - */ - - private static class VerificationKeyResolverKey { - - private final Map configs; - - private final Map moduleOptions; - - public VerificationKeyResolverKey(Map configs, Map moduleOptions) { - this.configs = configs; - this.moduleOptions = moduleOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - VerificationKeyResolverKey that = (VerificationKeyResolverKey) o; - return configs.equals(that.configs) && moduleOptions.equals(that.moduleOptions); - } - - @Override - public int hashCode() { - return Objects.hash(configs, moduleOptions); - } - - } - - /** - * RefCountingVerificationKeyResolver allows us to share a single - * {@link CloseableVerificationKeyResolver} instance between multiple - * {@link AuthenticateCallbackHandler} instances and perform the lifecycle methods the - * appropriate number of times. - */ - - private static class RefCountingVerificationKeyResolver implements CloseableVerificationKeyResolver { - - private final CloseableVerificationKeyResolver delegate; - - private final AtomicInteger count = new AtomicInteger(0); - - public RefCountingVerificationKeyResolver(CloseableVerificationKeyResolver delegate) { - this.delegate = delegate; - } - - @Override - public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { - return delegate.resolveKey(jws, nestingContext); - } - - @Override - public void init() throws IOException { - if (count.incrementAndGet() == 1) - delegate.init(); - } - - @Override - public void close() throws IOException { - if (count.decrementAndGet() == 0) - delegate.close(); - } - - } - } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java deleted file mode 100644 index 0ed4a1a230349..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import org.apache.kafka.common.config.SaslConfigs; - -import java.net.URL; -import java.util.Locale; -import java.util.Map; - -import javax.net.ssl.SSLSocketFactory; - -import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; -import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_CONNECT_TIMEOUT_MS; -import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS; -import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS; -import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.SCOPE_CONFIG; - -public class AccessTokenRetrieverFactory { - - /** - * Create an {@link AccessTokenRetriever} from the given SASL and JAAS configuration. - * - * Note: the returned AccessTokenRetriever is not initialized - * here and must be done by the caller prior to use. - * - * @param configs SASL configuration - * @param jaasConfig JAAS configuration - * - * @return Non-null {@link AccessTokenRetriever} - */ - - public static AccessTokenRetriever create(Map configs, Map jaasConfig) { - return create(configs, null, jaasConfig); - } - - public static AccessTokenRetriever create(Map configs, - String saslMechanism, - Map jaasConfig) { - ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); - URL tokenEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL); - - if (tokenEndpointUrl.getProtocol().toLowerCase(Locale.ROOT).equals("file")) { - return new FileTokenRetriever(cu.validateFile(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL)); - } else { - JaasOptionsUtils jou = new JaasOptionsUtils(jaasConfig); - String clientId = jou.validateString(CLIENT_ID_CONFIG); - String clientSecret = jou.validateString(CLIENT_SECRET_CONFIG); - String scope = jou.validateString(SCOPE_CONFIG, false); - - SSLSocketFactory sslSocketFactory = null; - - if (jou.shouldCreateSSLSocketFactory(tokenEndpointUrl)) - sslSocketFactory = jou.createSSLSocketFactory(); - - boolean urlencodeHeader = validateUrlencodeHeader(cu); - - return new HttpAccessTokenRetriever(clientId, - clientSecret, - scope, - sslSocketFactory, - tokenEndpointUrl.toString(), - cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MS), - cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MAX_MS), - cu.validateInteger(SASL_LOGIN_CONNECT_TIMEOUT_MS, false), - cu.validateInteger(SASL_LOGIN_READ_TIMEOUT_MS, false), - urlencodeHeader); - } - } - - /** - * In some cases, the incoming {@link Map} doesn't contain a value for - * {@link SaslConfigs#SASL_OAUTHBEARER_HEADER_URLENCODE}. Returning {@code null} from {@link Map#get(Object)} - * will cause a {@link NullPointerException} when it is later unboxed. - * - *

    - * - * This utility method ensures that we have a non-{@code null} value to use in the - * {@link HttpAccessTokenRetriever} constructor. - */ - static boolean validateUrlencodeHeader(ConfigurationUtils configurationUtils) { - Boolean urlencodeHeader = configurationUtils.validateBoolean(SASL_OAUTHBEARER_HEADER_URLENCODE, false); - - if (urlencodeHeader != null) - return urlencodeHeader; - else - return DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; - } - -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java deleted file mode 100644 index e4b39e5cc53c6..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import org.jose4j.keys.resolvers.VerificationKeyResolver; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME; - -public class AccessTokenValidatorFactory { - - public static AccessTokenValidator create(Map configs) { - return create(configs, (String) null); - } - - public static AccessTokenValidator create(Map configs, String saslMechanism) { - ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); - String scopeClaimName = cu.get(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME); - String subClaimName = cu.get(SASL_OAUTHBEARER_SUB_CLAIM_NAME); - return new LoginAccessTokenValidator(scopeClaimName, subClaimName); - } - - public static AccessTokenValidator create(Map configs, - VerificationKeyResolver verificationKeyResolver) { - return create(configs, null, verificationKeyResolver); - } - - public static AccessTokenValidator create(Map configs, - String saslMechanism, - VerificationKeyResolver verificationKeyResolver) { - ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); - Set expectedAudiences = null; - List l = cu.get(SASL_OAUTHBEARER_EXPECTED_AUDIENCE); - - if (l != null) - expectedAudiences = Set.copyOf(l); - - Integer clockSkew = cu.validateInteger(SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, false); - String expectedIssuer = cu.validateString(SASL_OAUTHBEARER_EXPECTED_ISSUER, false); - String scopeClaimName = cu.validateString(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME); - String subClaimName = cu.validateString(SASL_OAUTHBEARER_SUB_CLAIM_NAME); - - return new ValidatorAccessTokenValidator(clockSkew, - expectedAudiences, - expectedIssuer, - verificationKeyResolver, - scopeClaimName, - subClaimName); - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFile.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFile.java new file mode 100644 index 0000000000000..11cfb19cf4998 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFile.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.security.oauthbearer.JwtValidatorException; +import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerIllegalTokenException; +import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredJws; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; + +/** + * {@code CachedFile} goes a little beyond the basic file caching mechanism by allowing the file to be "transformed" + * into an in-memory representation of the file contents for easier use by the caller. + * + * @param Type of the "transformed" file contents + */ +public class CachedFile { + + /** + * Function object that provides as arguments the file and its contents and returns the in-memory representation + * of the file contents. + */ + public interface Transformer { + + /** + * Transforms the raw contents into a (possibly) different representation. + * + * @param file File containing the source data + * @param contents Data from file; could be zero length but not {@code null} + */ + T transform(File file, String contents); + } + + /** + * Function object that provides as arguments the file and its metadata and returns a flag to determine if the + * file should be reloaded from disk. + */ + public interface RefreshPolicy { + + /** + * Given the {@link File} and its snapshot, determine if the file should be reloaded from disk. + */ + boolean shouldRefresh(File file, Snapshot snapshot); + + /** + * This cache refresh policy only loads the file once. + */ + static RefreshPolicy staticPolicy() { + return (file, snapshot) -> snapshot == null; + } + + /** + * This policy will refresh the cached file if the snapshot's time is older than the current timestamp. + */ + static RefreshPolicy lastModifiedPolicy() { + return (file, snapshot) -> { + if (snapshot == null) + return true; + + return file.lastModified() != snapshot.lastModified(); + }; + } + } + + /** + * No-op transformer that retains the exact file contents as a string. + */ + public static final Transformer STRING_NOOP_TRANSFORMER = (file, contents) -> contents; + + /** + * This transformer really only validates that the given file contents represent a properly-formed JWT. + * If not, a {@link OAuthBearerIllegalTokenException} or {@link JwtValidatorException} is thrown. + */ + public static final Transformer STRING_JSON_VALIDATING_TRANSFORMER = (file, contents) -> { + contents = contents.trim(); + SerializedJwt serializedJwt = new SerializedJwt(contents); + OAuthBearerUnsecuredJws.toMap(serializedJwt.getHeader()); + OAuthBearerUnsecuredJws.toMap(serializedJwt.getPayload()); + return contents; + }; + + private final File file; + private final Transformer transformer; + private final RefreshPolicy cacheRefreshPolicy; + private Snapshot snapshot; + + public CachedFile(File file, Transformer transformer, RefreshPolicy cacheRefreshPolicy) { + this.file = file; + this.transformer = transformer; + this.cacheRefreshPolicy = cacheRefreshPolicy; + this.snapshot = snapshot(); + } + + public long size() { + return snapshot().size(); + } + + public long lastModified() { + return snapshot().lastModified(); + } + + public String contents() { + return snapshot().contents(); + } + + public T transformed() { + return snapshot().transformed(); + } + + private Snapshot snapshot() { + if (cacheRefreshPolicy.shouldRefresh(file, snapshot)) { + long size = file.length(); + long lastModified = file.lastModified(); + String contents; + + try { + contents = Files.readString(file.toPath()); + } catch (IOException e) { + throw new KafkaException("Error reading the file contents of OAuth resource " + file.getPath() + " for caching"); + } + + T transformed = transformer.transform(file, contents); + snapshot = new Snapshot<>(size, lastModified, contents, transformed); + } + + return snapshot; + } + + public static class Snapshot { + + private final long size; + + private final long lastModified; + + private final String contents; + + private final T transformed; + + public Snapshot(long size, long lastModified, String contents, T transformed) { + this.size = size; + this.lastModified = lastModified; + this.contents = contents; + this.transformed = transformed; + } + + public long size() { + return size; + } + + public long lastModified() { + return lastModified; + } + + public String contents() { + return contents; + } + + public T transformed() { + return transformed; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java index 5bf5ef068ed0f..582b4e86f701b 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java @@ -17,6 +17,8 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; +import org.apache.kafka.common.security.oauthbearer.JwtValidatorException; + import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -47,14 +49,14 @@ public class ClaimValidationUtils { * @return Unmodifiable {@link Set} that includes the values of the original set, but with * each value trimmed * - * @throws ValidateException Thrown if the value is null, contains duplicates, or + * @throws JwtValidatorException Thrown if the value is null, contains duplicates, or * if any of the values in the set are null, empty, * or whitespace only */ - public static Set validateScopes(String scopeClaimName, Collection scopes) throws ValidateException { + public static Set validateScopes(String scopeClaimName, Collection scopes) throws JwtValidatorException { if (scopes == null) - throw new ValidateException(String.format("%s value must be non-null", scopeClaimName)); + throw new JwtValidatorException(String.format("%s value must be non-null", scopeClaimName)); Set copy = new HashSet<>(); @@ -62,7 +64,7 @@ public static Set validateScopes(String scopeClaimName, Collection validateScopes(String scopeClaimName, Collectionnull or negative + * @throws JwtValidatorException Thrown if the value is null or negative */ - public static long validateExpiration(String claimName, Long claimValue) throws ValidateException { + public static long validateExpiration(String claimName, Long claimValue) throws JwtValidatorException { if (claimValue == null) - throw new ValidateException(String.format("%s value must be non-null", claimName)); + throw new JwtValidatorException(String.format("%s value must be non-null", claimName)); if (claimValue < 0) - throw new ValidateException(String.format("%s value must be non-negative; value given was \"%s\"", claimName, claimValue)); + throw new JwtValidatorException(String.format("%s value must be non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; } @@ -112,10 +114,10 @@ public static long validateExpiration(String claimName, Long claimValue) throws * * @return Trimmed version of the claimValue parameter * - * @throws ValidateException Thrown if the value is null, empty, or whitespace only + * @throws JwtValidatorException Thrown if the value is null, empty, or whitespace only */ - public static String validateSubject(String claimName, String claimValue) throws ValidateException { + public static String validateSubject(String claimName, String claimValue) throws JwtValidatorException { return validateString(claimName, claimValue); } @@ -132,12 +134,12 @@ public static String validateSubject(String claimName, String claimValue) throws * * @return Input parameter, as provided * - * @throws ValidateException Thrown if the value is negative + * @throws JwtValidatorException Thrown if the value is negative */ - public static Long validateIssuedAt(String claimName, Long claimValue) throws ValidateException { + public static Long validateIssuedAt(String claimName, Long claimValue) throws JwtValidatorException { if (claimValue != null && claimValue < 0) - throw new ValidateException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue)); + throw new JwtValidatorException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; } @@ -157,24 +159,24 @@ public static Long validateIssuedAt(String claimName, Long claimValue) throws Va * * @return Trimmed version of the value parameter * - * @throws ValidateException Thrown if the value is null, empty, or whitespace only + * @throws JwtValidatorException Thrown if the value is null, empty, or whitespace only */ - public static String validateClaimNameOverride(String name, String value) throws ValidateException { + public static String validateClaimNameOverride(String name, String value) throws JwtValidatorException { return validateString(name, value); } - private static String validateString(String name, String value) throws ValidateException { + private static String validateString(String name, String value) throws JwtValidatorException { if (value == null) - throw new ValidateException(String.format("%s value must be non-null", name)); + throw new JwtValidatorException(String.format("%s value must be non-null", name)); if (value.isEmpty()) - throw new ValidateException(String.format("%s value must be non-empty", name)); + throw new JwtValidatorException(String.format("%s value must be non-empty", name)); value = value.trim(); if (value.isEmpty()) - throw new ValidateException(String.format("%s value must not contain only whitespace", name)); + throw new JwtValidatorException(String.format("%s value must not contain only whitespace", name)); return value; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatter.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatter.java new file mode 100644 index 0000000000000..f1eaf99b9aa67 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatter.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.utils.Utils; + +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET; + +public class ClientCredentialsRequestFormatter implements HttpRequestFormatter { + + public static final String GRANT_TYPE = "client_credentials"; + + private final String clientId; + + private final String clientSecret; + + private final String scope; + + public ClientCredentialsRequestFormatter(String clientId, String clientSecret, String scope, boolean urlencode) { + if (Utils.isBlank(clientId)) + throw new ConfigException(SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, clientId); + + if (Utils.isBlank(clientSecret)) + throw new ConfigException(SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, clientId); + + clientId = clientId.trim(); + clientSecret = clientSecret.trim(); + scope = Utils.isBlank(scope) ? null : scope.trim(); + + // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 + if (urlencode) { + clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8); + clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8); + + if (scope != null) + scope = URLEncoder.encode(scope, StandardCharsets.UTF_8); + } + + this.clientId = clientId; + this.clientSecret = clientSecret; + this.scope = scope; + } + + @Override + public Map formatHeaders() { + String s = String.format("%s:%s", clientId, clientSecret); + // Per RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. + String encoded = Base64.getEncoder().encodeToString(Utils.utf8(s)); + String authorizationHeader = String.format("Basic %s", encoded); + + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Authorization", authorizationHeader); + headers.put("Cache-Control", "no-cache"); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + return headers; + } + + @Override + public String formatBody() { + StringBuilder requestParameters = new StringBuilder(); + requestParameters.append("grant_type=").append(GRANT_TYPE); + + if (scope != null) + requestParameters.append("&scope=").append(scope); + + return requestParameters.toString(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java index bf8ca0cb82211..d38d0708e9446 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java @@ -21,33 +21,14 @@ import org.jose4j.keys.resolvers.VerificationKeyResolver; -import java.io.Closeable; -import java.io.IOException; - /** * The {@link OAuthBearerValidatorCallbackHandler} uses a {@link VerificationKeyResolver} as * part of its validation of the incoming JWT. Some of the VerificationKeyResolver * implementations use resources like threads, connections, etc. that should be properly closed * when no longer needed. Since the VerificationKeyResolver interface itself doesn't * define a close method, we provide a means to do that here. - * - * @see OAuthBearerValidatorCallbackHandler - * @see VerificationKeyResolver - * @see Closeable */ -public interface CloseableVerificationKeyResolver extends Initable, Closeable, VerificationKeyResolver { - - /** - * Lifecycle method to perform a clean shutdown of the {@link VerificationKeyResolver}. - * This must be performed by the caller to ensure the correct state, freeing up - * and releasing any resources performed in {@link #init()}. - * - * @throws IOException Thrown on errors related to IO during closure - */ - - default void close() throws IOException { - // This method left intentionally blank. - } +public interface CloseableVerificationKeyResolver extends OAuthBearerConfigurable, VerificationKeyResolver { } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java index 10f700826c8bd..3eebecf8fde10 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java @@ -18,19 +18,28 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.utils.Utils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.File; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; -import java.nio.file.Path; import java.util.Arrays; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT; @@ -41,6 +50,8 @@ public class ConfigurationUtils { + private static final Logger LOG = LoggerFactory.getLogger(ConfigurationUtils.class); + private final Map configs; private final String prefix; @@ -58,6 +69,10 @@ public ConfigurationUtils(Map configs, String saslMechanism) { this.prefix = null; } + public boolean containsKey(String name) { + return get(name) != null; + } + /** * Validates that, if a value is supplied, is a file that: * @@ -71,7 +86,7 @@ public ConfigurationUtils(Map configs, String saslMechanism) { * ignored. Any whitespace is trimmed off of the beginning and end. */ - public Path validateFile(String name) { + public File validateFileUrl(String name) { URL url = validateUrl(name); File file; @@ -81,6 +96,35 @@ public Path validateFile(String name) { throw new ConfigException(String.format("The OAuth configuration option %s contains a URL (%s) that is malformed: %s", name, url, e.getMessage())); } + return validateFile(name, file); + } + + /** + * Validates that the file: + * + *

  • + *
      exists
    + *
      has read permission
    + *
      points to a file
    + *
  • + */ + public File validateFile(String name) { + String s = validateString(name); + File file = validateFile(name, new File(s).getAbsoluteFile()); + throwIfFileIsNotAllowed(name, file.getAbsolutePath()); + return file; + } + + /** + * Validates that the file: + * + *
  • + *
      exists
    + *
      has read permission
    + *
      points to a file
    + *
  • + */ + private File validateFile(String name, File file) { if (!file.exists()) throw new ConfigException(String.format("The OAuth configuration option %s contains a file (%s) that doesn't exist", name, file)); @@ -90,7 +134,7 @@ public Path validateFile(String name) { if (file.isDirectory()) throw new ConfigException(String.format("The OAuth configuration option %s references a directory (%s), not a file", name, file)); - return file.toPath(); + return file; } /** @@ -110,7 +154,7 @@ public Integer validateInteger(String name, boolean isRequired) { if (value == null) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); + throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); else return null; } @@ -143,7 +187,7 @@ public Long validateLong(String name, boolean isRequired, Long min) { if (value == null) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); + throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); else return null; } @@ -187,42 +231,42 @@ public URL validateUrl(String name) { if (!(protocol.equals("http") || protocol.equals("https") || protocol.equals("file"))) throw new ConfigException(String.format("The OAuth configuration option %s contains a URL (%s) that contains an invalid protocol (%s); only \"http\", \"https\", and \"file\" protocol are supported", name, value, protocol)); - throwIfURLIsNotAllowed(value); + throwIfURLIsNotAllowed(name, value); return url; } - public String validateString(String name) throws ValidateException { + public String validatePassword(String name) { + Password value = get(name); + + if (value == null || Utils.isBlank(value.value())) + throw new ConfigException(String.format("The OAuth configuration option %s value is required", name)); + + return value.value().trim(); + } + + public String validateString(String name) { return validateString(name, true); } - public String validateString(String name, boolean isRequired) throws ValidateException { + public String validateString(String name, boolean isRequired) { String value = get(name); - if (value == null) { + if (Utils.isBlank(value)) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s value must be non-null", name)); + throw new ConfigException(String.format("The OAuth configuration option %s value is required", name)); else return null; } - value = value.trim(); - - if (value.isEmpty()) { - if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s value must not contain only whitespace", name)); - else - return null; - } - - return value; + return value.trim(); } public Boolean validateBoolean(String name, boolean isRequired) { Boolean value = get(name); if (value == null && isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); + throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); return value; } @@ -237,16 +281,137 @@ public T get(String name) { return (T) configs.get(name); } + public static T getConfiguredInstance(Map configs, + String saslMechanism, + List jaasConfigEntries, + String configName, + Class expectedClass) { + Object configValue = configs.get(configName); + Object o; + + if (configValue instanceof String) { + String implementationClassName = (String) configValue; + + try { + o = Utils.newInstance(implementationClassName, expectedClass); + } catch (Exception e) { + throw new ConfigException( + String.format( + "The class %s defined in the %s configuration could not be instantiated: %s", + implementationClassName, + configName, + e.getMessage() + ) + ); + } + } else if (configValue instanceof Class) { + Class implementationClass = (Class) configValue; + + try { + o = Utils.newInstance(implementationClass); + } catch (Exception e) { + throw new ConfigException( + String.format( + "The class %s defined in the %s configuration could not be instantiated: %s", + implementationClass.getName(), + configName, + e.getMessage() + ) + ); + } + } else if (configValue != null) { + throw new ConfigException( + String.format( + "The type for the %s configuration must be either %s or %s, but was %s", + configName, + String.class.getName(), + Class.class.getName(), + configValue.getClass().getName() + ) + ); + } else { + throw new ConfigException(String.format("The required configuration %s was null", configName)); + } + + if (!expectedClass.isInstance(o)) { + throw new ConfigException( + String.format( + "The configured class (%s) for the %s configuration is not an instance of %s, as is required", + o.getClass().getName(), + configName, + expectedClass.getName() + ) + ); + } + + if (o instanceof OAuthBearerConfigurable) { + try { + ((OAuthBearerConfigurable) o).configure(configs, saslMechanism, jaasConfigEntries); + } catch (Exception e) { + Utils.maybeCloseQuietly(o, "Instance of class " + o.getClass().getName() + " failed call to configure()"); + LOG.warn( + "The class {} defined in the {} configuration encountered an error on configure(): {}", + o.getClass().getName(), + configName, + e.getMessage(), + e + ); + throw new ConfigException( + String.format( + "The class %s defined in the %s configuration encountered an error on configure(): %s", + o.getClass().getName(), + configName, + e.getMessage() + ) + ); + } + } + + return expectedClass.cast(o); + } + // visible for testing // make sure the url is in the "org.apache.kafka.sasl.oauthbearer.allowed.urls" system property - void throwIfURLIsNotAllowed(String value) { - Set allowedUrls = Arrays.stream( - System.getProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT).split(",")) - .map(String::trim) - .collect(Collectors.toSet()); - if (!allowedUrls.contains(value)) { - throw new ConfigException(value + " is not allowed. Update system property '" - + ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG + "' to allow " + value); + void throwIfURLIsNotAllowed(String configName, String configValue) { + throwIfResourceIsNotAllowed( + "URL", + configName, + configValue, + ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, + ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT + ); + } + + // visible for testing + // make sure the file is in the "org.apache.kafka.sasl.oauthbearer.allowed.files" system property + void throwIfFileIsNotAllowed(String configName, String configValue) { + throwIfResourceIsNotAllowed( + "file", + configName, + configValue, + ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, + ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT + ); + } + + private void throwIfResourceIsNotAllowed(String resourceType, + String configName, + String configValue, + String propertyName, + String propertyDefault) { + String[] allowedArray = System.getProperty(propertyName, propertyDefault).split(","); + Set allowed = Arrays.stream(allowedArray) + .map(String::trim) + .collect(Collectors.toSet()); + + if (!allowed.contains(configValue)) { + String message = String.format( + "The %s cannot be accessed due to restrictions. Update the system property '%s' to allow the %s to be accessed.", + resourceType, + propertyName, + resourceType + ); + throw new ConfigException(configName, configValue, message); } } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java deleted file mode 100644 index c145cf7596959..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import org.apache.kafka.common.utils.Utils; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * FileTokenRetriever is an {@link AccessTokenRetriever} that will load the contents, - * interpreting them as a JWT access key in the serialized form. - * - * @see AccessTokenRetriever - */ - -public class FileTokenRetriever implements AccessTokenRetriever { - - private final Path accessTokenFile; - - private String accessToken; - - public FileTokenRetriever(Path accessTokenFile) { - this.accessTokenFile = accessTokenFile; - } - - @Override - public void init() throws IOException { - this.accessToken = Utils.readFileAsString(accessTokenFile.toFile().getPath()); - // always non-null; to remove any newline chars or backend will report err - this.accessToken = this.accessToken.trim(); - } - - @Override - public String retrieve() throws IOException { - if (accessToken == null) - throw new IllegalStateException("Access token is null; please call init() first"); - - return accessToken; - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetriever.java similarity index 65% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetriever.java index fdc5707278a60..4ae838e1f28ce 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetriever.java @@ -14,13 +14,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.kafka.common.security.oauthbearer.internals.secured; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.security.oauthbearer.JwtRetriever; +import org.apache.kafka.common.security.oauthbearer.JwtRetrieverException; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler; -import org.apache.kafka.common.utils.Utils; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -35,11 +35,9 @@ import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; -import java.net.URLEncoder; import java.nio.charset.StandardCharsets; -import java.util.Base64; -import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -47,31 +45,26 @@ import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_CONNECT_TIMEOUT_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; /** - * HttpAccessTokenRetriever is an {@link AccessTokenRetriever} that will - * communicate with an OAuth/OIDC provider directly via HTTP to post client credentials + * HttpJwtRetriever is a {@link JwtRetriever} that will communicate with an OAuth/OIDC + * provider directly via HTTP to post client credentials * ({@link OAuthBearerLoginCallbackHandler#CLIENT_ID_CONFIG}/{@link OAuthBearerLoginCallbackHandler#CLIENT_SECRET_CONFIG}) - * to a publicized token endpoint URL - * ({@link SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL}). - * - * @see AccessTokenRetriever - * @see OAuthBearerLoginCallbackHandler#CLIENT_ID_CONFIG - * @see OAuthBearerLoginCallbackHandler#CLIENT_SECRET_CONFIG - * @see OAuthBearerLoginCallbackHandler#SCOPE_CONFIG - * @see SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL + * to a publicized token endpoint URL ({@link SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL}). */ +public class HttpJwtRetriever implements JwtRetriever { -public class HttpAccessTokenRetriever implements AccessTokenRetriever { - - private static final Logger log = LoggerFactory.getLogger(HttpAccessTokenRetriever.class); + private static final Logger log = LoggerFactory.getLogger(HttpJwtRetriever.class); private static final Set UNRETRYABLE_HTTP_CODES; - private static final int MAX_RESPONSE_BODY_LENGTH = 1000; - - public static final String AUTHORIZATION_HEADER = "Authorization"; - static { // This does not have to be an exhaustive list. There are other HTTP codes that // are defined in different RFCs (e.g. https://datatracker.ietf.org/doc/html/rfc6585) @@ -97,46 +90,38 @@ public class HttpAccessTokenRetriever implements AccessTokenRetriever { UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_VERSION); } - private final String clientId; + private final HttpRequestFormatter requestFormatter; + + private SSLSocketFactory sslSocketFactory; - private final String clientSecret; + private URL tokenEndpointUrl; - private final String scope; + private long loginRetryBackoffMs; - private final SSLSocketFactory sslSocketFactory; + private long loginRetryBackoffMaxMs; - private final String tokenEndpointUrl; + private Integer loginConnectTimeoutMs; - private final long loginRetryBackoffMs; + private Integer loginReadTimeoutMs; - private final long loginRetryBackoffMaxMs; + public HttpJwtRetriever(HttpRequestFormatter requestFormatter) { + this.requestFormatter = Objects.requireNonNull(requestFormatter); + } - private final Integer loginConnectTimeoutMs; + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + JaasOptionsUtils jou = new JaasOptionsUtils(saslMechanism, jaasConfigEntries); - private final Integer loginReadTimeoutMs; + tokenEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL); - private final boolean urlencodeHeader; + if (jou.shouldCreateSSLSocketFactory(tokenEndpointUrl)) + sslSocketFactory = jou.createSSLSocketFactory(); - public HttpAccessTokenRetriever(String clientId, - String clientSecret, - String scope, - SSLSocketFactory sslSocketFactory, - String tokenEndpointUrl, - long loginRetryBackoffMs, - long loginRetryBackoffMaxMs, - Integer loginConnectTimeoutMs, - Integer loginReadTimeoutMs, - boolean urlencodeHeader) { - this.clientId = Objects.requireNonNull(clientId); - this.clientSecret = Objects.requireNonNull(clientSecret); - this.scope = scope; - this.sslSocketFactory = sslSocketFactory; - this.tokenEndpointUrl = Objects.requireNonNull(tokenEndpointUrl); - this.loginRetryBackoffMs = loginRetryBackoffMs; - this.loginRetryBackoffMaxMs = loginRetryBackoffMaxMs; - this.loginConnectTimeoutMs = loginConnectTimeoutMs; - this.loginReadTimeoutMs = loginReadTimeoutMs; - this.urlencodeHeader = urlencodeHeader; + this.loginRetryBackoffMs = cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MS); + this.loginRetryBackoffMaxMs = cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MAX_MS); + this.loginConnectTimeoutMs = cu.validateInteger(SASL_LOGIN_CONNECT_TIMEOUT_MS, false); + this.loginReadTimeoutMs = cu.validateInteger(SASL_LOGIN_READ_TIMEOUT_MS, false); } /** @@ -151,15 +136,12 @@ public HttpAccessTokenRetriever(String clientId, * * @return Non-null JWT access token string * - * @throws IOException Thrown on errors related to IO during retrieval + * @throws JwtRetrieverException Thrown on errors related to IO, parsing, etc. during retrieval */ - - @Override - public String retrieve() throws IOException { - String authorizationHeader = formatAuthorizationHeader(clientId, clientSecret, urlencodeHeader); - String requestBody = formatRequestBody(scope); + public String retrieve() throws JwtRetrieverException { + String requestBody = requestFormatter.formatBody(); Retry retry = new Retry<>(loginRetryBackoffMs, loginRetryBackoffMaxMs); - Map headers = Collections.singletonMap(AUTHORIZATION_HEADER, authorizationHeader); + Map headers = requestFormatter.formatHeaders(); String responseBody; @@ -168,7 +150,7 @@ public String retrieve() throws IOException { HttpURLConnection con = null; try { - con = (HttpURLConnection) new URL(tokenEndpointUrl).openConnection(); + con = (HttpURLConnection) tokenEndpointUrl.openConnection(); if (sslSocketFactory != null && con instanceof HttpsURLConnection) ((HttpsURLConnection) con).setSSLSocketFactory(sslSocketFactory); @@ -182,13 +164,14 @@ public String retrieve() throws IOException { } }); } catch (ExecutionException e) { - if (e.getCause() instanceof IOException) - throw (IOException) e.getCause(); + if (e.getCause() instanceof JwtRetrieverException) + throw (JwtRetrieverException) e.getCause(); else throw new KafkaException(e.getCause()); } - return parseAccessToken(responseBody); + JwtResponseParser responseParser = new JwtResponseParser(); + return responseParser.parseJwt(responseBody); } public static String post(HttpURLConnection con, @@ -330,71 +313,4 @@ static String formatErrorMessage(String errorResponseBody) { } return String.format("{%s}", errorResponseBody); } - - static String parseAccessToken(String responseBody) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - JsonNode rootNode = mapper.readTree(responseBody); - JsonNode accessTokenNode = rootNode.at("/access_token"); - - if (accessTokenNode == null) { - // Only grab the first N characters so that if the response body is huge, we don't - // blow up. - String snippet = responseBody; - - if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) { - int actualLength = responseBody.length(); - String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH); - snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength); - } - - throw new IOException(String.format("The token endpoint response did not contain an access_token value. Response: (%s)", snippet)); - } - - return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue()); - } - - static String formatAuthorizationHeader(String clientId, String clientSecret, boolean urlencode) { - clientId = sanitizeString("the token endpoint request client ID parameter", clientId); - clientSecret = sanitizeString("the token endpoint request client secret parameter", clientSecret); - - // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 - if (urlencode) { - clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8); - clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8); - } - - String s = String.format("%s:%s", clientId, clientSecret); - // Per RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. - String encoded = Base64.getEncoder().encodeToString(Utils.utf8(s)); - return String.format("Basic %s", encoded); - } - - static String formatRequestBody(String scope) { - StringBuilder requestParameters = new StringBuilder(); - requestParameters.append("grant_type=client_credentials"); - - if (scope != null && !scope.trim().isEmpty()) { - scope = scope.trim(); - String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8); - requestParameters.append("&scope=").append(encodedScope); - } - - return requestParameters.toString(); - } - - private static String sanitizeString(String name, String value) { - if (value == null) - throw new IllegalArgumentException(String.format("The value for %s must be non-null", name)); - - if (value.isEmpty()) - throw new IllegalArgumentException(String.format("The value for %s must be non-empty", name)); - - value = value.trim(); - - if (value.isEmpty()) - throw new IllegalArgumentException(String.format("The value for %s must not contain only whitespace", name)); - - return value; - } - } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpRequestFormatter.java similarity index 75% rename from clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java rename to clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpRequestFormatter.java index fc2e3d2a2e83a..a1a63603a6170 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpRequestFormatter.java @@ -14,14 +14,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.kafka.common.security.oauthbearer.internals.secured; -public class LoginAccessTokenValidatorTest extends AccessTokenValidatorTest { +import java.util.Map; + +public interface HttpRequestFormatter { - @Override - protected AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder builder) { - return new LoginAccessTokenValidator(builder.scopeClaimName(), builder.subjectClaimName()); - } + Map formatHeaders(); + String formatBody(); } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java index 3e49595dbc1b2..ec6d3daafe8e2 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java @@ -20,10 +20,12 @@ import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; import org.apache.kafka.common.security.ssl.DefaultSslEngineFactory; import org.apache.kafka.common.security.ssl.SslFactory; +import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,6 +54,10 @@ public JaasOptionsUtils(Map options) { this.options = options; } + public JaasOptionsUtils(String saslMechanism, List jaasConfigEntries) { + this.options = getOptions(saslMechanism, jaasConfigEntries); + } + public static Map getOptions(String saslMechanism, List jaasConfigEntries) { if (!OAuthBearerLoginModule.OAUTHBEARER_MECHANISM.equals(saslMechanism)) throw new IllegalArgumentException(String.format("Unexpected SASL mechanism: %s", saslMechanism)); @@ -62,6 +68,10 @@ public static Map getOptions(String saslMechanism, ListJwksFileVerificationKeyResolver is a {@link VerificationKeyResolver} implementation @@ -79,41 +83,46 @@ * @see org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL * @see VerificationKeyResolver */ - public class JwksFileVerificationKeyResolver implements CloseableVerificationKeyResolver { private static final Logger log = LoggerFactory.getLogger(JwksFileVerificationKeyResolver.class); - private final Path jwksFile; - - private VerificationKeyResolver delegate; - - public JwksFileVerificationKeyResolver(Path jwksFile) { - this.jwksFile = jwksFile; - } + private CachedFile delegate; @Override - public void init() throws IOException { - log.debug("Starting creation of new VerificationKeyResolver from {}", jwksFile); - String json = Utils.readFileAsString(jwksFile.toFile().getPath()); - - JsonWebKeySet jwks; - - try { - jwks = new JsonWebKeySet(json); - } catch (JoseException e) { - throw new IOException(e); - } - - delegate = new JwksVerificationKeyResolver(jwks.getJsonWebKeys()); + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + File file = cu.validateFileUrl(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); + delegate = new CachedFile<>(file, new VerificationKeyResolverTransformer(), lastModifiedPolicy()); } @Override public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { if (delegate == null) - throw new UnresolvableKeyException("VerificationKeyResolver delegate is null; please call init() first"); + throw new UnresolvableKeyException("VerificationKeyResolver delegate is null; please call configure() first"); - return delegate.resolveKey(jws, nestingContext); + return delegate.transformed().resolveKey(jws, nestingContext); } + /** + * "Transforms" the raw file contents into a {@link VerificationKeyResolver} that can be used to resolve + * the keys provided in the JWT. + */ + private static class VerificationKeyResolverTransformer implements CachedFile.Transformer { + + @Override + public VerificationKeyResolver transform(File file, String contents) { + log.debug("Starting creation of new VerificationKeyResolver from {}", file.getPath()); + + JsonWebKeySet jwks; + + try { + jwks = new JsonWebKeySet(contents); + } catch (Exception e) { + throw new ConfigException(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file.getPath(), e.getMessage()); + } + + return new JwksVerificationKeyResolver(jwks.getJsonWebKeys()); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtBearerRequestFormatter.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtBearerRequestFormatter.java new file mode 100644 index 0000000000000..495d1434d98ab --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtBearerRequestFormatter.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured; + + +import org.apache.kafka.common.utils.Utils; + +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +public class JwtBearerRequestFormatter implements HttpRequestFormatter { + + public static final String GRANT_TYPE = "urn:ietf:params:oauth:grant-type:jwt-bearer"; + + private final String scope; + private final Supplier assertionSupplier; + + public JwtBearerRequestFormatter(String scope, Supplier assertionSupplier) { + this.scope = scope; + this.assertionSupplier = assertionSupplier; + } + + @Override + public String formatBody() { + String assertion = assertionSupplier.get(); + StringBuilder requestParameters = new StringBuilder(); + requestParameters.append("grant_type=").append(URLEncoder.encode(GRANT_TYPE, StandardCharsets.UTF_8)); + requestParameters.append("&assertion=").append(URLEncoder.encode(assertion, StandardCharsets.UTF_8)); + + if (!Utils.isBlank(scope)) + requestParameters.append("&scope=").append(URLEncoder.encode(scope.trim(), StandardCharsets.UTF_8)); + + return requestParameters.toString(); + } + + @Override + public Map formatHeaders() { + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Cache-Control", "no-cache"); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + return headers; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParser.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParser.java new file mode 100644 index 0000000000000..bab996cd3e965 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParser.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.JwtRetrieverException; +import org.apache.kafka.common.utils.Utils; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; + +public class JwtResponseParser { + + private static final String[] JSON_PATHS = new String[] {"/access_token", "/id_token"}; + private static final int MAX_RESPONSE_BODY_LENGTH = 1000; + + public String parseJwt(String responseBody) throws JwtRetrieverException { + ObjectMapper mapper = new ObjectMapper(); + JsonNode rootNode; + + try { + rootNode = mapper.readTree(responseBody); + } catch (IOException e) { + throw new JwtRetrieverException(e); + } + + for (String jsonPath : JSON_PATHS) { + JsonNode node = rootNode.at(jsonPath); + + if (node != null && !node.isMissingNode()) { + String value = node.textValue(); + + if (!Utils.isBlank(value)) { + return value.trim(); + } + } + } + + // Only grab the first N characters so that if the response body is huge, we don't blow up. + String snippet = responseBody; + + if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) { + int actualLength = responseBody.length(); + String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH); + snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength); + } + + throw new JwtRetrieverException(String.format("The token endpoint response did not contain a valid JWT. Response: (%s)", snippet)); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerConfigurable.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerConfigurable.java new file mode 100644 index 0000000000000..4c721e17bff2a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerConfigurable.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.Configurable; +import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.login.AppConfigurationEntry; + +/** + * Analogue to {@link Configurable} for OAuth-based authentication. This interface presents a similar + * method signature as that of the {@link AuthenticateCallbackHandler} interface. However, this interface is + * needed because {@link AuthenticateCallbackHandler} extends the JDK's {@link CallbackHandler} interface. + * + *

    + * + * Note: + * + *

      + *
    1. + * Any class that implements this interface should initialize resources via + * {@link #configure(Map, String, List)} and release them via {@link #close()}. + *
    2. + *
    3. + * Any class that instantiates an object that implements {@code OAuthBearerConfigurable} + * must properly call that object's ({@link #configure(Map, String, List)} and {@link #close()}) methods + * so that the object can initialize and release resources. + *
    4. + *
    + */ +public interface OAuthBearerConfigurable extends Closeable { + + /** + * Configures this object for the specified SASL mechanism. + * + * @param configs Key-value pairs containing the parsed configuration options of + * the client or broker. Note that these are the Kafka configuration options + * and not the JAAS configuration options. JAAS config options may be obtained + * from `jaasConfigEntries`. For configs that may be specified as both Kafka config + * as well as JAAS config (e.g. sasl.kerberos.service.name), the configuration + * is treated as invalid if conflicting values are provided. + * @param saslMechanism Negotiated SASL mechanism. For clients, this is the SASL + * mechanism configured for the client. For brokers, this is the mechanism + * negotiated with the client and is one of the mechanisms enabled on the broker. + * @param jaasConfigEntries JAAS configuration entries from the JAAS login context. + * This list contains a single entry for clients and may contain more than + * one entry for brokers if multiple mechanisms are enabled on a listener using + * static JAAS configuration where there is no mapping between mechanisms and + * login module entries. In this case, implementations can use the login module in + * `jaasConfigEntries` to identify the entry corresponding to `saslMechanism`. + * Alternatively, dynamic JAAS configuration option + * {@link org.apache.kafka.common.config.SaslConfigs#SASL_JAAS_CONFIG} may be + * configured on brokers with listener and mechanism prefix, in which case + * only the configuration entry corresponding to `saslMechanism` will be provided + * in `jaasConfigEntries`. + */ + default void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + + } + + /** + * Closes any resources that were initialized by {@link #configure(Map, String, List)}. + */ + default void close() throws IOException { + // Do nothing... + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java index 62261fed58df8..d8014010a7d7a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; +import org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator; import org.apache.kafka.common.utils.Time; import org.jose4j.jwk.HttpsJwks; @@ -25,7 +26,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.LinkedHashMap; @@ -49,15 +49,14 @@ * This instance is created and provided to the * {@link org.jose4j.keys.resolvers.HttpsJwksVerificationKeyResolver} that is used when using * an HTTP-/HTTPS-based {@link org.jose4j.keys.resolvers.VerificationKeyResolver}, which is then - * provided to the {@link ValidatorAccessTokenValidator} to use in validating the signature of + * provided to the {@link BrokerJwtValidator} to use in validating the signature of * a JWT. * * @see org.jose4j.keys.resolvers.HttpsJwksVerificationKeyResolver * @see org.jose4j.keys.resolvers.VerificationKeyResolver - * @see ValidatorAccessTokenValidator + * @see BrokerJwtValidator */ - -public final class RefreshingHttpsJwks implements Initable, Closeable { +public final class RefreshingHttpsJwks implements OAuthBearerConfigurable { private static final Logger log = LoggerFactory.getLogger(RefreshingHttpsJwks.class); @@ -171,7 +170,6 @@ public RefreshingHttpsJwks(Time time, this(time, httpsJwks, refreshMs, refreshRetryBackoffMs, refreshRetryBackoffMaxMs, Executors.newSingleThreadScheduledExecutor()); } - @Override public void init() throws IOException { try { log.debug("init started"); @@ -375,5 +373,4 @@ public boolean maybeExpediteRefresh(String keyId) { } } } - } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java index 52d0c6c39785f..d6f6a01089419 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java @@ -17,6 +17,8 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; +import org.apache.kafka.common.KafkaException; + import org.jose4j.jwk.HttpsJwks; import org.jose4j.jwk.JsonWebKey; import org.jose4j.jwk.VerificationJwkSelector; @@ -31,6 +33,9 @@ import java.io.IOException; import java.security.Key; import java.util.List; +import java.util.Map; + +import javax.security.auth.login.AppConfigurationEntry; /** * RefreshingHttpsJwksVerificationKeyResolver is a @@ -80,7 +85,6 @@ * @see RefreshingHttpsJwks * @see HttpsJwks */ - public class RefreshingHttpsJwksVerificationKeyResolver implements CloseableVerificationKeyResolver { private static final Logger log = LoggerFactory.getLogger(RefreshingHttpsJwksVerificationKeyResolver.class); @@ -97,15 +101,14 @@ public RefreshingHttpsJwksVerificationKeyResolver(RefreshingHttpsJwks refreshing } @Override - public void init() throws IOException { + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { try { - log.debug("init started"); - + log.debug("configure started"); refreshingHttpsJwks.init(); + } catch (IOException e) { + throw new KafkaException(e); } finally { isInitialized = true; - - log.debug("init completed"); } } @@ -123,7 +126,7 @@ public void close() { @Override public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { if (!isInitialized) - throw new IllegalStateException("Please call init() first"); + throw new IllegalStateException("Please call configure() first"); try { List jwks = refreshingHttpsJwks.getJsonWebKeys(); @@ -148,5 +151,4 @@ public Key resolveKey(JsonWebSignature jws, List nestingContex throw new UnresolvableKeyException(sb, e); } } - } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java index f45865fa63848..b9a500410964b 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java @@ -17,6 +17,8 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; +import org.apache.kafka.common.security.oauthbearer.JwtValidatorException; + /** * SerializedJwt provides a modicum of structure and validation around a JWT's serialized form by * splitting and making the three sections (header, payload, and signature) available to the user. @@ -39,12 +41,12 @@ public SerializedJwt(String token) { token = token.trim(); if (token.isEmpty()) - throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); String[] splits = token.split("\\."); if (splits.length != 3) - throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); this.token = token.trim(); this.header = validateSection(splits[0]); @@ -92,11 +94,11 @@ public String getSignature() { return signature; } - private String validateSection(String section) throws ValidateException { + private String validateSection(String section) throws JwtValidatorException { section = section.trim(); if (section.isEmpty()) - throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); return section; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java index 0422045fc029d..85ad53246beda 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java @@ -17,55 +17,71 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; +import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.utils.Time; import org.jose4j.http.Get; import org.jose4j.jwk.HttpsJwks; +import org.jose4j.jws.JsonWebSignature; +import org.jose4j.jwx.JsonWebStructure; +import org.jose4j.lang.UnresolvableKeyException; +import java.io.IOException; import java.net.URL; -import java.nio.file.Path; +import java.security.Key; +import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import javax.net.ssl.SSLSocketFactory; +import javax.security.auth.login.AppConfigurationEntry; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL; +/** + * Because a {@link CloseableVerificationKeyResolver} instance can spawn threads and issue + * HTTP(S) calls ({@link RefreshingHttpsJwksVerificationKeyResolver}), we only want to create + * a new instance for each particular set of configuration. Because each set of configuration + * may have multiple instances, we want to reuse the single instance. + */ public class VerificationKeyResolverFactory { - /** - * Create an {@link AccessTokenRetriever} from the given - * {@link org.apache.kafka.common.config.SaslConfigs}. - * - * Note: the returned CloseableVerificationKeyResolver is not - * initialized here and must be done by the caller. - * - * Primarily exposed here for unit testing. - * - * @param configs SASL configuration - * - * @return Non-null {@link CloseableVerificationKeyResolver} - */ - public static CloseableVerificationKeyResolver create(Map configs, - Map jaasConfig) { - return create(configs, null, jaasConfig); + private static final Map CACHE = new HashMap<>(); + + public static synchronized CloseableVerificationKeyResolver get(Map configs, + String saslMechanism, + List jaasConfigEntries) { + VerificationKeyResolverKey key = new VerificationKeyResolverKey(configs, saslMechanism, jaasConfigEntries); + + return CACHE.computeIfAbsent(key, k -> + new RefCountingVerificationKeyResolver( + create( + configs, + saslMechanism, + jaasConfigEntries + ) + ) + ); } - public static CloseableVerificationKeyResolver create(Map configs, - String saslMechanism, - Map jaasConfig) { + static CloseableVerificationKeyResolver create(Map configs, + String saslMechanism, + List jaasConfigEntries) { ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); URL jwksEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); + CloseableVerificationKeyResolver resolver; if (jwksEndpointUrl.getProtocol().toLowerCase(Locale.ROOT).equals("file")) { - Path p = cu.validateFile(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); - return new JwksFileVerificationKeyResolver(p); + resolver = new JwksFileVerificationKeyResolver(); } else { long refreshIntervalMs = cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS, true, 0L); - JaasOptionsUtils jou = new JaasOptionsUtils(jaasConfig); + JaasOptionsUtils jou = new JaasOptionsUtils(saslMechanism, jaasConfigEntries); SSLSocketFactory sslSocketFactory = null; if (jou.shouldCreateSSLSocketFactory(jwksEndpointUrl)) @@ -85,8 +101,87 @@ public static CloseableVerificationKeyResolver create(Map configs, refreshIntervalMs, cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS), cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS)); - return new RefreshingHttpsJwksVerificationKeyResolver(refreshingHttpsJwks); + resolver = new RefreshingHttpsJwksVerificationKeyResolver(refreshingHttpsJwks); + } + + resolver.configure(configs, saslMechanism, jaasConfigEntries); + return resolver; + } + + /** + * VkrKey is a simple structure which encapsulates the criteria for different + * sets of configuration. This will allow us to use this object as a key in a {@link Map} + * to keep a single instance per key. + */ + + private static class VerificationKeyResolverKey { + + private final Map configs; + + private final String saslMechanism; + + private final Map moduleOptions; + + public VerificationKeyResolverKey(Map configs, + String saslMechanism, + List jaasConfigEntries) { + this.configs = configs; + this.saslMechanism = saslMechanism; + this.moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + VerificationKeyResolverKey that = (VerificationKeyResolverKey) o; + return configs.equals(that.configs) && saslMechanism.equals(that.saslMechanism) && moduleOptions.equals(that.moduleOptions); + } + + @Override + public int hashCode() { + return Objects.hash(configs, saslMechanism, moduleOptions); } } + /** + * RefCountingVerificationKeyResolver allows us to share a single + * {@link CloseableVerificationKeyResolver} instance between multiple + * {@link AuthenticateCallbackHandler} instances and perform the lifecycle methods the + * appropriate number of times. + */ + + private static class RefCountingVerificationKeyResolver implements CloseableVerificationKeyResolver { + + private final CloseableVerificationKeyResolver delegate; + + private final AtomicInteger count = new AtomicInteger(0); + + public RefCountingVerificationKeyResolver(CloseableVerificationKeyResolver delegate) { + this.delegate = delegate; + } + + @Override + public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { + return delegate.resolveKey(jws, nestingContext); + } + + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + if (count.incrementAndGet() == 1) + delegate.configure(configs, saslMechanism, jaasConfigEntries); + } + + @Override + public void close() throws IOException { + if (count.decrementAndGet() == 0) + delegate.close(); + } + } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionCreator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionCreator.java new file mode 100644 index 0000000000000..5c619c6369381 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionCreator.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import java.io.Closeable; +import java.io.IOException; +import java.security.GeneralSecurityException; + +/** + * {@code AssertionCreator} is used to create a client-signed OAuth assertion that can be used with different + * grant types. See RFC 7521 for specifics. + * + *

    + * + * The assertion creator has three main steps: + * + *

      + *
    1. Create the JWT header
    2. + *
    3. Create the JWT payload
    4. + *
    5. Sign
    6. + *
    + * + *

    + * + * Step 1 is to dynamically create the JWT header. The implementation may add whatever values it needs, but + * the {@code alg} (algorithm), {@code kid} (key ID), and {@code type} (type) are usually present. Here is + * an example of the JSON version of the JWT header: + * + *

    + * {
    + *   "kid": "9d82418e64e0541066637ca8592d459c",
    + *   "alg": RS256,
    + *   "typ": "JWT",
    + * }
    + * 
    + * + *

    + * + * Step 2 is to create the JWT payload from the claims provided to {@link #create(AssertionJwtTemplate)}. Depending on the + * implementation, other claims may be dynamically generated and added to the JWT payload. Or, some of the + * claims in the incoming map could be ignored or modified. Here's an example where the implementation has + * added the {@code iat} (initialized at) and {@code exp} (expires) claims: + * + *

    + * {
    + *   "iat": 1741121401,
    + *   "exp": 1741125001,
    + *   "sub": "some-service-account",
    + *   "aud": "my_audience",
    + *   "iss": "https://example.com",
    + *   "...": "...",
    + * }
    + * 
    + * + *

    + * + * Step 3 is to use the configured private key to sign the header and payload and serialize in the compact + * JWT format. The means by which the private key (if any) is made available for use is up to the + * implementation. The private key could be loaded from a file, downloaded from a trusted resource, + * embedded in the configuration, etc. + */ +public interface AssertionCreator extends Closeable { + + /** + * Creates and signs an OAuth assertion by converting the given claims into JWT and then signing it using + * the configured algorithm. + * + *

    + * + * @param template {@link AssertionJwtTemplate} with optional header and/or claims to include in the JWT + */ + String create(AssertionJwtTemplate template) throws GeneralSecurityException, IOException; + + /** + * Closes any resources used by this implementation. The default implementation of + * this method is a no op, for convenience to implementors. + */ + @Override + default void close() throws IOException { + // Do nothing... + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionJwtTemplate.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionJwtTemplate.java new file mode 100644 index 0000000000000..ce6599c1b1d66 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionJwtTemplate.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; + +/** + * {@code AssertionJwtTemplate} is used to provide values for use by {@link AssertionCreator}. + * The JWT header and/or payload used in the assertion likely requires headers and claims. Not all identity + * providers require the same set of headers and claims; some may require a given header or claim while + * other identity providers may prohibit it. In order to provide the most flexibility, the header + * values and claims that are to be included in the JWT can be added via a template. + * + *

    + * + * Both the {@link #header()} and {@link #payload()} APIs return a map of Objects. This because the + * JSON specification allow values to be one of the following "types": + * + *

    + * + * However, because the maps must be converted into JSON, it's important that any nested types use standard + * Java type equivalents (Map, List, String, Integer, Double, and Boolean) so that the JSON library will + * know how to serialize the entire object graph. + */ +public interface AssertionJwtTemplate extends Closeable { + + /** + * Returns a map containing zero or more header values. + * + * @return Values to include in the JWT header + */ + Map header(); + + /** + * Returns a map containing zero or more JWT payload claim values. + * + * @return Values to include in the JWT payload + */ + Map payload(); + + /** + * Closes any resources used by this implementation. The default implementation of + * this method is a no op, for convenience to implementors. + */ + @Override + default void close() throws IOException { + // Do nothing... + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionUtils.java new file mode 100644 index 0000000000000..c4eed76e195be --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/AssertionUtils.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.utils.Time; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.Signature; +import java.security.spec.PKCS8EncodedKeySpec; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.crypto.Cipher; +import javax.crypto.EncryptedPrivateKeyInfo; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE; + +/** + * Set of utilities for the OAuth JWT assertion logic. + */ +public class AssertionUtils { + + public static final String TOKEN_SIGNING_ALGORITHM_RS256 = "RS256"; + public static final String TOKEN_SIGNING_ALGORITHM_ES256 = "ES256"; + + /** + * Inspired by {@code org.apache.kafka.common.security.ssl.DefaultSslEngineFactory.PemStore}, which is not + * visible to reuse directly. + */ + public static PrivateKey privateKey(byte[] privateKeyContents, + Optional passphrase) throws GeneralSecurityException, IOException { + PKCS8EncodedKeySpec keySpec; + + if (passphrase.isPresent()) { + EncryptedPrivateKeyInfo keyInfo = new EncryptedPrivateKeyInfo(privateKeyContents); + String algorithm = keyInfo.getAlgName(); + SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(algorithm); + SecretKey pbeKey = secretKeyFactory.generateSecret(new PBEKeySpec(passphrase.get().toCharArray())); + Cipher cipher = Cipher.getInstance(algorithm); + cipher.init(Cipher.DECRYPT_MODE, pbeKey, keyInfo.getAlgParameters()); + keySpec = keyInfo.getKeySpec(cipher); + } else { + byte[] pkcs8EncodedBytes = Base64.getDecoder().decode(privateKeyContents); + keySpec = new PKCS8EncodedKeySpec(pkcs8EncodedBytes); + } + + KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + return keyFactory.generatePrivate(keySpec); + } + + public static Signature getSignature(String algorithm) throws GeneralSecurityException { + if (algorithm.equalsIgnoreCase(TOKEN_SIGNING_ALGORITHM_RS256)) { + return Signature.getInstance("SHA256withRSA"); + } else if (algorithm.equalsIgnoreCase(TOKEN_SIGNING_ALGORITHM_ES256)) { + return Signature.getInstance("SHA256withECDSA"); + } else { + throw new NoSuchAlgorithmException(String.format("Unsupported signing algorithm: %s", algorithm)); + } + } + + public static String sign(String algorithm, PrivateKey privateKey, String contentToSign) throws GeneralSecurityException { + Signature signature = getSignature(algorithm); + signature.initSign(privateKey); + signature.update(contentToSign.getBytes(StandardCharsets.UTF_8)); + byte[] signedContent = signature.sign(); + return Base64.getUrlEncoder().withoutPadding().encodeToString(signedContent); + } + + public static Optional staticAssertionJwtTemplate(ConfigurationUtils cu) { + if (cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD) || + cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS) || + cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB)) { + Map staticClaimsPayload = new HashMap<>(); + + if (cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD)) + staticClaimsPayload.put("aud", cu.validateString(SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD)); + + if (cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS)) + staticClaimsPayload.put("iss", cu.validateString(SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS)); + + if (cu.containsKey(SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB)) + staticClaimsPayload.put("sub", cu.validateString(SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB)); + + Map header = Map.of(); + return Optional.of(new StaticAssertionJwtTemplate(header, staticClaimsPayload)); + } else { + return Optional.empty(); + } + } + + public static Optional fileAssertionJwtTemplate(ConfigurationUtils cu) { + if (cu.containsKey(SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE)) { + File assertionTemplateFile = cu.validateFile(SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE); + return Optional.of(new FileAssertionJwtTemplate(assertionTemplateFile)); + } else { + return Optional.empty(); + } + } + + public static DynamicAssertionJwtTemplate dynamicAssertionJwtTemplate(ConfigurationUtils cu, Time time) { + String algorithm = cu.validateString(SASL_OAUTHBEARER_ASSERTION_ALGORITHM); + int expSeconds = cu.validateInteger(SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, true); + int nbfSeconds = cu.validateInteger(SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, true); + boolean includeJti = cu.validateBoolean(SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, true); + return new DynamicAssertionJwtTemplate(time, algorithm, expSeconds, nbfSeconds, includeJti); + } + + public static LayeredAssertionJwtTemplate layeredAssertionJwtTemplate(ConfigurationUtils cu, Time time) { + List templates = new ArrayList<>(); + staticAssertionJwtTemplate(cu).ifPresent(templates::add); + fileAssertionJwtTemplate(cu).ifPresent(templates::add); + templates.add(dynamicAssertionJwtTemplate(cu, time)); + return new LayeredAssertionJwtTemplate(templates); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreator.java new file mode 100644 index 0000000000000..52b9eb2fb536c --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreator.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.security.oauthbearer.JwtRetrieverException; +import org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile; +import org.apache.kafka.common.utils.Utils; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.PrivateKey; +import java.util.Base64; +import java.util.Optional; + +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.RefreshPolicy.lastModifiedPolicy; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.privateKey; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.sign; + +/** + * This is the "default" {@link AssertionCreator} in that it is the common case of using a configured signing + * algorithm, private key file, and optional passphrase to sign a JWT to dynamically create an assertion. + * + *

    + * + * The provided private key file will be cached in memory but will be refreshed when the file changes. + * Note: there is not yet a facility to reload the configured passphrase. If using a private key + * passphrase, either use the same passphrase for each private key or else restart the client/application + * so that the new private key and passphrase will be used. + */ +public class DefaultAssertionCreator implements AssertionCreator { + + private static final Base64.Encoder BASE64_ENCODER = Base64.getUrlEncoder().withoutPadding(); + private final String algorithm; + private final CachedFile privateKeyFile; + + public DefaultAssertionCreator(String algorithm, File privateKeyFile, Optional passphrase) { + this.algorithm = algorithm; + + this.privateKeyFile = new CachedFile<>( + privateKeyFile, + new PrivateKeyTransformer(passphrase), + lastModifiedPolicy() + ); + } + + @Override + public String create(AssertionJwtTemplate template) throws GeneralSecurityException, IOException { + ObjectMapper mapper = new ObjectMapper(); + String header = BASE64_ENCODER.encodeToString(Utils.utf8(mapper.writeValueAsString(template.header()))); + String payload = BASE64_ENCODER.encodeToString(Utils.utf8(mapper.writeValueAsString(template.payload()))); + String content = header + "." + payload; + PrivateKey privateKey = privateKeyFile.transformed(); + String signedContent = sign(algorithm, privateKey, content); + return content + "." + signedContent; + } + + private static class PrivateKeyTransformer implements CachedFile.Transformer { + + private final Optional passphrase; + + public PrivateKeyTransformer(Optional passphrase) { + this.passphrase = passphrase; + } + + @Override + public PrivateKey transform(File file, String contents) { + try { + contents = contents.replace("-----BEGIN PRIVATE KEY-----", "") + .replace("-----END PRIVATE KEY-----", "") + .replace("\n", ""); + + return privateKey(contents.getBytes(StandardCharsets.UTF_8), passphrase); + } catch (GeneralSecurityException | IOException e) { + throw new JwtRetrieverException("An error occurred generating the OAuth assertion private key from " + file.getPath(), e); + } + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplate.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplate.java new file mode 100644 index 0000000000000..ef1f45e4d1c98 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplate.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.utils.Time; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * A "dynamic" {@link AssertionJwtTemplate} is that which will dynamically add the following values + * at runtime: + * + *

      + *
    • {@code alg} (Algorithm) header
    • + *
    • {@code typ} (Type) header
    • + *
    • {@code iat} (Issued at) timestamp claim (in seconds)
    • + *
    • {@code exp} (Expiration) timestamp claim (in seconds)
    • + *
    • {@code nbf} (Not before) timestamp claim (in seconds)
    • + *
    • (Optionally) {@code jti} (JWT ID) claim
    • + *
    + */ +public class DynamicAssertionJwtTemplate implements AssertionJwtTemplate { + + private final Time time; + private final String algorithm; + private final int expSeconds; + private final int nbfSeconds; + private final boolean includeJti; + + public DynamicAssertionJwtTemplate(Time time, + String algorithm, + int expSeconds, + int nbfSeconds, + boolean includeJti) { + this.time = time; + this.algorithm = algorithm; + this.expSeconds = expSeconds; + this.nbfSeconds = nbfSeconds; + this.includeJti = includeJti; + } + + @Override + public Map header() { + Map values = new HashMap<>(); + values.put("alg", algorithm); + values.put("typ", "JWT"); + return Collections.unmodifiableMap(values); + } + + @Override + public Map payload() { + long currentTimeSecs = time.milliseconds() / 1000L; + + Map values = new HashMap<>(); + values.put("iat", currentTimeSecs); + values.put("exp", currentTimeSecs + expSeconds); + values.put("nbf", currentTimeSecs - nbfSeconds); + + if (includeJti) + values.put("jti", UUID.randomUUID().toString()); + + return Collections.unmodifiableMap(values); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreator.java new file mode 100644 index 0000000000000..a6eb1eb220840 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreator.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile; + +import java.io.File; +import java.io.IOException; +import java.security.GeneralSecurityException; + +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.RefreshPolicy.lastModifiedPolicy; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.STRING_JSON_VALIDATING_TRANSFORMER; + +/** + * An {@link AssertionCreator} which takes a file from which the pre-created assertion is loaded and returned. + * If the file changes on disk, it will be reloaded in memory without needing to restart the client/application. + */ +public class FileAssertionCreator implements AssertionCreator { + + private final CachedFile assertionFile; + + public FileAssertionCreator(File assertionFile) { + this.assertionFile = new CachedFile<>(assertionFile, STRING_JSON_VALIDATING_TRANSFORMER, lastModifiedPolicy()); + } + + @Override + public String create(AssertionJwtTemplate ignored) throws GeneralSecurityException, IOException { + return assertionFile.transformed(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplate.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplate.java new file mode 100644 index 0000000000000..83c82feb01504 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplate.java @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.File; +import java.util.Collections; +import java.util.Map; + +import static org.apache.kafka.common.security.oauthbearer.internals.secured.CachedFile.RefreshPolicy.lastModifiedPolicy; + +/** + * {@code FileAssertionJwtTemplate} is used by the user to specify a JSON file on disk that contains static values + * that can be loaded and used to construct the assertion. The file structure is a JSON containing optionally a + * header and/or payload top-level attribute. + * + *

    + * + * Here is a minimally viable JSON structure: + * + *

    + * {
    + * }
    + * 
    + * + * OK, at that point it doesn't make sense for the user to build that file. + * + *

    + * + * Here is another, slightly less minimal JSON template: + * + *

    + * {
    + *    "header": {
    + *     "foo": 1
    + *   },
    + *    "payload": {
    + *     "bar": 2
    + *   }
    + * }
    + * 
    + * + * This provides a single header value and a single payload claim. + * + *

    + * + * A more realistic example template looks like so: + * + *

    + * {
    + *   "header": {
    + *     "kid": "f829d41b06f14f9e",
    + *     "some-random-header": 123456
    + *   },
    + *   "payload": {
    + *     "sub": "some-service-account",
    + *     "aud": "my_audience",
    + *     "iss": "https://example.com",
    + *     "useSomeResource": false,
    + *     "allowedAnimals": [
    + *       "cat",
    + *       "dog",
    + *       "hamster"
    + *     ]
    + *   }
    + * }
    + * 
    + * + * The AssertionCreator would accept the AssertionJwtTemplate and augment the template header and/or payload + * with dynamic values. For example, the above header would be augmented with the {@code alg} (algorithm) and + * {@code typ} (type) values per the OAuth RFC: + * + *
    + * {
    + *   "kid": "f829d41b06f14f9e",
    + *   "some-random-header": 123456,
    + *   "alg": "RS256",
    + *   "typ": "JWT"
    + * }
    + * 
    + * + * And the payload would also be augmented to add the {@code iat} (issued at) and {@code exp} (expiration) timestamps: + * + *
    + * {
    + *   "iat": 1741121401,
    + *   "exp": 1741125001,
    + *   "sub": "some-service-account",
    + *   "aud": "my_audience",
    + *   "iss": "https://example.com",
    + *   "useSomeResource": false,
    + *   "allowedAnimals": [
    + *     "cat",
    + *     "dog",
    + *     "hamster"
    + *   ]
    + * }
    + * 
    + */ +public class FileAssertionJwtTemplate implements AssertionJwtTemplate { + + @SuppressWarnings("unchecked") + private static final CachedFile.Transformer JSON_TRANSFORMER = (file, json) -> { + try { + ObjectMapper mapper = new ObjectMapper(); + Map map = (Map) mapper.readValue(json, Map.class); + + Map header = (Map) map.computeIfAbsent("header", k -> Map.of()); + Map payload = (Map) map.computeIfAbsent("payload", k -> Map.of()); + + return new CachedJwtTemplate(header, payload); + } catch (Exception e) { + throw new KafkaException("An error occurred parsing the OAuth assertion template file from " + file.getPath(), e); + } + }; + + private final CachedFile jsonFile; + + public FileAssertionJwtTemplate(File jsonFile) { + this.jsonFile = new CachedFile<>(jsonFile, JSON_TRANSFORMER, lastModifiedPolicy()); + } + + @Override + public Map header() { + return jsonFile.transformed().header; + } + + @Override + public Map payload() { + return jsonFile.transformed().payload; + } + + /** + * Internally, the cached file is represented by the two maps for the header and payload. + */ + private static class CachedJwtTemplate { + + private final Map header; + + private final Map payload; + + private CachedJwtTemplate(Map header, Map payload) { + this.header = Collections.unmodifiableMap(header); + this.payload = Collections.unmodifiableMap(payload); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/LayeredAssertionJwtTemplate.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/LayeredAssertionJwtTemplate.java new file mode 100644 index 0000000000000..847b622f97d2a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/LayeredAssertionJwtTemplate.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.utils.Utils; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * This {@link AssertionJwtTemplate} layers multiple templates to produce an aggregated template. + * This is used, in practice, to achieve a layered approach where templates added later take precedence + * over templates that appear earlier in the list. Take for example the following list of templates, + * added in this order: + * + *
      + *
    1. Static/configuration-based JWT headers and claims via {@link StaticAssertionJwtTemplate}
    2. + *
    3. File-based JWT headers and claims via {@link FileAssertionJwtTemplate}
    4. + *
    5. Dynamic JWT headers and claims via {@link DynamicAssertionJwtTemplate}
    6. + *
    + * + * The templates are specified in ascending order of precedence. That is, in the list, a template with + * a list index of N+1 will effectively overwrite values provided by template at index N. + * In the above example, the {@link DynamicAssertionJwtTemplate} (index 2) will overwrite any values + * specified by the {@link FileAssertionJwtTemplate} (index 1), which will in turn overwrite any values + * from the {@link StaticAssertionJwtTemplate}. + * + *

    + * + * In practice, there shouldn't be much in the way of overwriting. The headers and claims provided + * by each layer are mostly distinct. For example, a {@link StaticAssertionJwtTemplate} loads values + * mainly from the configuration, such as the iss (Issuer) claim + * ({@link SaslConfigs#SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS}). The iss claim probably + * doesn't change all that often, statically configuring it is sensible. However, other values, such + * as the exp (Expires) claim changes dynamically over time. Specifying a static expiration + * value doesn't make much sense. + * + *

    + * + * There are probably cases where it may make sense to overwrite static configuration with values that + * are more up-to-date. In that case, the {@link FileAssertionJwtTemplate} allows the user to provide + * headers and claims via a file that can be reloaded when it is modified. So, for example, if the value + * of the iss (Issuer) claim changes temporarily, the user can update the assertion + * template file ({@link SaslConfigs#SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE}) to add an + * iss claim. In so doing, the template file will be reloaded, the + * {@code FileAssertionJwtTemplate} will overwrite the claim value in the generated assertion, and the + * client/application does not need to be restarted for the new value to take effect. Likewise, when the + * iss claim needs to be changed back to its normal value, the user can either update the + * template file with the new value, or simply remove the claim from the file altogether so that the + * original, static claim value is restored. + */ +public class LayeredAssertionJwtTemplate implements AssertionJwtTemplate { + + private final List templates; + + public LayeredAssertionJwtTemplate(AssertionJwtTemplate... templates) { + this.templates = Arrays.asList(templates); + } + + public LayeredAssertionJwtTemplate(List templates) { + this.templates = Collections.unmodifiableList(templates); + } + + @Override + public Map header() { + Map header = new HashMap<>(); + + for (AssertionJwtTemplate template : templates) + header.putAll(template.header()); + + return Collections.unmodifiableMap(header); + } + + @Override + public Map payload() { + Map payload = new HashMap<>(); + + for (AssertionJwtTemplate template : templates) + payload.putAll(template.payload()); + + return Collections.unmodifiableMap(payload); + } + + @Override + public void close() { + for (AssertionJwtTemplate template : templates) { + Utils.closeQuietly(template, "JWT assertion template"); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/StaticAssertionJwtTemplate.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/StaticAssertionJwtTemplate.java new file mode 100644 index 0000000000000..6d668f6406433 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/StaticAssertionJwtTemplate.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import java.util.Collections; +import java.util.Map; + +/** + * This {@link AssertionJwtTemplate} uses a static set of headers and claims provided on initialization. + * The values typically come from configuration, and it is often used in conjunction with other templates + * such as {@link LayeredAssertionJwtTemplate}. + */ +public class StaticAssertionJwtTemplate implements AssertionJwtTemplate { + + private final Map header; + + private final Map payload; + + public StaticAssertionJwtTemplate() { + this.header = Map.of(); + this.payload = Map.of(); + } + + public StaticAssertionJwtTemplate(Map header, Map payload) { + this.header = Collections.unmodifiableMap(header); + this.payload = Collections.unmodifiableMap(payload); + } + + @Override + public Map header() { + return header; + } + + @Override + public Map payload() { + return payload; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java index 6b1148e291b4c..bea463a8d145a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java @@ -30,7 +30,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -295,12 +294,11 @@ public String subject() throws OAuthBearerIllegalTokenException { public static Map toMap(String split) throws OAuthBearerIllegalTokenException { Map retval = new HashMap<>(); try { - byte[] decode = Base64.getDecoder().decode(split); + byte[] decode = Base64.getUrlDecoder().decode(split); JsonNode jsonNode = new ObjectMapper().readTree(decode); if (jsonNode == null) throw new OAuthBearerIllegalTokenException(OAuthBearerValidationResult.newFailure("malformed JSON")); - for (Iterator> iterator = jsonNode.fields(); iterator.hasNext();) { - Entry entry = iterator.next(); + for (Entry entry : jsonNode.properties()) { retval.put(entry.getKey(), convert(entry.getValue())); } return Collections.unmodifiableMap(retval); diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java index 0a3d587df9060..a5068dc83abce 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java @@ -137,14 +137,14 @@ public void configure(Map configs) { SecurityUtils.addConfiguredSecurityProviders(this.configs); List cipherSuitesList = (List) configs.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); - if (cipherSuitesList != null && !cipherSuitesList.isEmpty()) { + if (!cipherSuitesList.isEmpty()) { this.cipherSuites = cipherSuitesList.toArray(new String[0]); } else { this.cipherSuites = null; } List enabledProtocolsList = (List) configs.get(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); - if (enabledProtocolsList != null && !enabledProtocolsList.isEmpty()) { + if (!enabledProtocolsList.isEmpty()) { this.enabledProtocols = enabledProtocolsList.toArray(new String[0]); } else { this.enabledProtocols = null; diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java index 705aafaaa70db..88bedf0dcaa23 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.message.GetTelemetrySubscriptionsRequestData; import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; import org.apache.kafka.common.message.PushTelemetryRequestData; @@ -41,7 +42,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collections; @@ -51,6 +51,7 @@ import java.util.Optional; import java.util.Set; import java.util.StringJoiner; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -270,6 +271,7 @@ class DefaultClientTelemetrySender implements ClientTelemetrySender { private static final double INITIAL_PUSH_JITTER_LOWER = 0.5; private static final double INITIAL_PUSH_JITTER_UPPER = 1.5; + private final Set unsupportedCompressionTypes = ConcurrentHashMap.newKeySet(); private final ReadWriteLock lock = new ReentrantReadWriteLock(); private final Condition subscriptionLoaded = lock.writeLock().newCondition(); /* @@ -527,13 +529,13 @@ public void handleResponse(PushTelemetryResponse response) { @Override public void handleFailedGetTelemetrySubscriptionsRequest(KafkaException maybeFatalException) { log.debug("The broker generated an error for the get telemetry network API request", maybeFatalException); - handleFailedRequest(maybeFatalException != null); + handleFailedRequest(isRetryable(maybeFatalException)); } @Override public void handleFailedPushTelemetryRequest(KafkaException maybeFatalException) { log.debug("The broker generated an error for the push telemetry network API request", maybeFatalException); - handleFailedRequest(maybeFatalException != null); + handleFailedRequest(isRetryable(maybeFatalException)); } @Override @@ -627,6 +629,12 @@ public void initiateClose() { } } + private boolean isRetryable(final KafkaException maybeFatalException) { + return maybeFatalException == null || + (maybeFatalException instanceof RetriableException) || + (maybeFatalException.getCause() != null && maybeFatalException.getCause() instanceof RetriableException); + } + private Optional> createSubscriptionRequest(ClientTelemetrySubscription localSubscription) { /* If we've previously retrieved a subscription, it will contain the client instance ID @@ -714,12 +722,26 @@ private Optional> createPushRequest(ClientTelemetrySubscription local return Optional.empty(); } - CompressionType compressionType = ClientTelemetryUtils.preferredCompressionType(localSubscription.acceptedCompressionTypes()); + CompressionType compressionType = ClientTelemetryUtils.preferredCompressionType(localSubscription.acceptedCompressionTypes(), unsupportedCompressionTypes); ByteBuffer compressedPayload; try { compressedPayload = ClientTelemetryUtils.compress(payload, compressionType); - } catch (IOException e) { - log.info("Failed to compress telemetry payload for compression: {}, sending uncompressed data", compressionType); + } catch (Throwable e) { + // Distinguish between recoverable errors (NoClassDefFoundError for missing compression libs) + // and fatal errors (OutOfMemoryError, etc.) that should terminate telemetry. + if (e instanceof Error && !(e instanceof NoClassDefFoundError) && !(e.getCause() instanceof NoClassDefFoundError)) { + lock.writeLock().lock(); + try { + state = ClientTelemetryState.TERMINATED; + } finally { + lock.writeLock().unlock(); + } + log.error("Unexpected error occurred while compressing telemetry payload for compression: {}, stopping client telemetry", compressionType, e); + throw new KafkaException("Unexpected compression error", e); + } + + log.debug("Failed to compress telemetry payload for compression: {}, sending uncompressed data", compressionType, e); + unsupportedCompressionTypes.add(compressionType); compressedPayload = ByteBuffer.wrap(payload.toByteArray()); compressionType = CompressionType.NONE; } diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java index 3c555afb3b05d..111b041946c6a 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.function.Predicate; import io.opentelemetry.proto.metrics.v1.MetricsData; @@ -181,13 +182,23 @@ public static boolean validateRequiredResourceLabels(Map metadat return validateResourceLabel(metadata, MetricsContext.NAMESPACE); } - public static CompressionType preferredCompressionType(List acceptedCompressionTypes) { - if (acceptedCompressionTypes != null && !acceptedCompressionTypes.isEmpty()) { - // Broker is providing the compression types in order of preference. Grab the - // first one. - return acceptedCompressionTypes.get(0); - } - return CompressionType.NONE; + /** + * Determines the preferred compression type from broker-accepted types, avoiding unsupported ones. + * + * @param acceptedCompressionTypes the list of compression types accepted by the broker in order + * of preference (must not be null, use empty list if no compression is accepted) + * @param unsupportedCompressionTypes the set of compression types that should be avoided due to + * missing libraries or previous failures (must not be null) + * @return the preferred compression type to use, or {@link CompressionType#NONE} if no acceptable + * compression type is available + */ + public static CompressionType preferredCompressionType(List acceptedCompressionTypes, Set unsupportedCompressionTypes) { + // Broker is providing the compression types in order of preference. Grab the + // first one that's supported. + return acceptedCompressionTypes.stream() + .filter(t -> !unsupportedCompressionTypes.contains(t)) + .findFirst() + .orElse(CompressionType.NONE); } public static ByteBuffer compress(MetricsData metrics, CompressionType compressionType) throws IOException { diff --git a/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java b/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java index f9ebd82ea11cf..cc2d7b75f1e6d 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Gauge; -import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.slf4j.Logger; @@ -26,6 +25,7 @@ import java.io.InputStream; import java.lang.management.ManagementFactory; +import java.util.Map; import java.util.Properties; import javax.management.JMException; @@ -69,7 +69,7 @@ public static synchronized void registerAppInfo(String prefix, String id, Metric AppInfo mBean = new AppInfo(nowMs); server.registerMBean(mBean, name); - registerMetrics(metrics, mBean); // prefix will be added later by JmxReporter + registerMetrics(metrics, mBean, id); // prefix will be added later by JmxReporter } catch (JMException e) { log.warn("Error registering AppInfo mbean", e); } @@ -82,7 +82,7 @@ public static synchronized void unregisterAppInfo(String prefix, String id, Metr if (server.isRegistered(name)) server.unregisterMBean(name); - unregisterMetrics(metrics); + unregisterMetrics(metrics, id); } catch (JMException e) { log.warn("Error unregistering AppInfo mbean", e); } finally { @@ -90,23 +90,36 @@ public static synchronized void unregisterAppInfo(String prefix, String id, Metr } } - private static MetricName metricName(Metrics metrics, String name) { - return metrics.metricName(name, "app-info", "Metric indicating " + name); + private static MetricName metricName(Metrics metrics, String name, Map tags) { + return metrics.metricName(name, "app-info", "Metric indicating " + name, tags); } - private static void registerMetrics(Metrics metrics, AppInfo appInfo) { - if (metrics != null) { - metrics.addMetric(metricName(metrics, "version"), new ImmutableValue<>(appInfo.getVersion())); - metrics.addMetric(metricName(metrics, "commit-id"), new ImmutableValue<>(appInfo.getCommitId())); - metrics.addMetric(metricName(metrics, "start-time-ms"), new ImmutableValue<>(appInfo.getStartTimeMs())); + private static void registerMetrics(Metrics metrics, AppInfo appInfo, String clientId) { + if (metrics == null) return; + // Most Kafka clients (producer/consumer/admin) set the client-id tag in the metrics config. + // Although we don’t explicitly parse client-id here, these metrics are automatically tagged with client-id. + metrics.addMetric(metricName(metrics, "version", Map.of()), (Gauge) (config, now) -> appInfo.getVersion()); + metrics.addMetric(metricName(metrics, "commit-id", Map.of()), (Gauge) (config, now) -> appInfo.getCommitId()); + metrics.addMetric(metricName(metrics, "start-time-ms", Map.of()), (Gauge) (config, now) -> appInfo.getStartTimeMs()); + // MirrorMaker/Worker doesn't set client-id tag into the metrics config, so we need to set it here. + if (!metrics.config().tags().containsKey("client-id") && clientId != null) { + metrics.addMetric(metricName(metrics, "version", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getVersion()); + metrics.addMetric(metricName(metrics, "commit-id", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getCommitId()); + metrics.addMetric(metricName(metrics, "start-time-ms", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getStartTimeMs()); } } - private static void unregisterMetrics(Metrics metrics) { - if (metrics != null) { - metrics.removeMetric(metricName(metrics, "version")); - metrics.removeMetric(metricName(metrics, "commit-id")); - metrics.removeMetric(metricName(metrics, "start-time-ms")); + private static void unregisterMetrics(Metrics metrics, String clientId) { + if (metrics == null) return; + + metrics.removeMetric(metricName(metrics, "version", Map.of())); + metrics.removeMetric(metricName(metrics, "commit-id", Map.of())); + metrics.removeMetric(metricName(metrics, "start-time-ms", Map.of())); + + if (!metrics.config().tags().containsKey("client-id") && clientId != null) { + metrics.removeMetric(metricName(metrics, "version", Map.of("client-id", clientId))); + metrics.removeMetric(metricName(metrics, "commit-id", Map.of("client-id", clientId))); + metrics.removeMetric(metricName(metrics, "start-time-ms", Map.of("client-id", clientId))); } } @@ -143,17 +156,4 @@ public Long getStartTimeMs() { } } - - static class ImmutableValue implements Gauge { - private final T value; - - public ImmutableValue(T value) { - this.value = value; - } - - @Override - public T value(MetricConfig config, long now) { - return value; - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java index a760f817b815f..ba51a8dd49114 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java @@ -40,7 +40,7 @@ public final class ByteBufferUnmapper { private static final RuntimeException UNMAP_NOT_SUPPORTED_EXCEPTION; static { - Object unmap = null; + MethodHandle unmap = null; RuntimeException exception = null; try { unmap = lookupUnmapMethodHandle(); @@ -48,7 +48,7 @@ public final class ByteBufferUnmapper { exception = e; } if (unmap != null) { - UNMAP = (MethodHandle) unmap; + UNMAP = unmap; UNMAP_NOT_SUPPORTED_EXCEPTION = null; } else { UNMAP = null; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java index 9a891e0846384..ff422bbe53826 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java @@ -48,7 +48,7 @@ public ChildFirstClassLoader(String classPath, ClassLoader parent) { private static URL[] classpathToURLs(String classPath) { ArrayList urls = new ArrayList<>(); for (String path : classPath.split(File.pathSeparator)) { - if (path == null || path.trim().isEmpty()) + if (path.trim().isEmpty()) continue; File file = new File(path); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java index 397ab623b527b..2c3702af13167 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java @@ -75,7 +75,8 @@ public static boolean getBoolean(final Map configs, final String } else if (value instanceof String) { return Boolean.parseBoolean((String) value); } else { - log.error("Invalid value (" + value + ") on configuration '" + key + "'. The default value '" + defaultValue + "' will be used instead. Please specify a true/false value."); + log.error("Invalid value ({}) on configuration '{}'. The default value '{}' will be used instead. Please specify a true/false value.", + value, key, defaultValue); return defaultValue; } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java b/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java deleted file mode 100644 index 4e28bb35c669c..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.utils; - -import java.util.Iterator; -import java.util.function.Function; - -/** - * Provides a flattened iterator over the inner elements of an outer iterator. - */ -public final class FlattenedIterator extends AbstractIterator { - private final Iterator outerIterator; - private final Function> innerIteratorFunction; - private Iterator innerIterator; - - public FlattenedIterator(Iterator outerIterator, Function> innerIteratorFunction) { - this.outerIterator = outerIterator; - this.innerIteratorFunction = innerIteratorFunction; - } - - @Override - protected I makeNext() { - while (innerIterator == null || !innerIterator.hasNext()) { - if (outerIterator.hasNext()) - innerIterator = innerIteratorFunction.apply(outerIterator.next()); - else - return allDone(); - } - return innerIterator.next(); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java index 86ae7bd78cda3..e0a8f89e84819 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java @@ -69,7 +69,7 @@ public void register() throws ReflectiveOperationException { for (String signal : SIGNALS) { register(signal, jvmSignalHandlers); } - log.info("Registered signal handlers for " + String.join(", ", SIGNALS)); + log.info("Registered signal handlers for {}", String.join(", ", SIGNALS)); } private Object createSignalHandler(final Map jvmSignalHandlers) { diff --git a/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java b/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java index 47a280fee5808..0c8876ebd6a6a 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java @@ -33,7 +33,7 @@ public class SecurityUtils { - private static final Logger LOGGER = LoggerFactory.getLogger(SecurityConfig.class); + private static final Logger LOGGER = LoggerFactory.getLogger(SecurityUtils.class); private static final Map NAME_TO_RESOURCE_TYPES; private static final Map NAME_TO_OPERATIONS; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index 02a62ee4524b8..dc7b0e7625a90 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -64,6 +64,7 @@ import java.util.Collections; import java.util.Date; import java.util.EnumSet; +import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -856,7 +857,7 @@ public static Properties mkObjectProperties(final Map properties public static void delete(final File rootFile) throws IOException { if (rootFile == null) return; - Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor() { + Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor<>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException) { @@ -1402,7 +1403,7 @@ public static Set from32BitField(final int intValue) { * @return new Collector, M, M> */ public static > Collector, M, M> entriesToMap(final Supplier mapSupplier) { - return new Collector, M, M>() { + return new Collector<>() { @Override public Supplier supplier() { return mapSupplier; @@ -1469,7 +1470,24 @@ public static Map filterMap(final Map map, final Predicate propsToMap(Properties properties) { - return castToStringObjectMap(properties); + // This try catch block is to handle the case when the Properties object has non-String keys + // when calling the propertyNames() method. This is a workaround for the lack of a method that + // returns all properties including defaults and does not attempt to convert all keys to Strings. + Enumeration enumeration; + try { + enumeration = properties.propertyNames(); + } catch (ClassCastException e) { + throw new ConfigException("One or more keys is not a string."); + } + Map map = new HashMap<>(); + while (enumeration.hasMoreElements()) { + String key = (String) enumeration.nextElement(); + // properties.get(key) returns null for defaults, but properties.getProperty(key) returns null for + // non-string values. A combination of the two methods is used to cover all cases + Object value = (properties.get(key) != null) ? properties.get(key) : properties.getProperty(key); + map.put(key, value); + } + return map; } /** @@ -1479,6 +1497,9 @@ public static Map propsToMap(Properties properties) { * @throws ConfigException if any key is not a String */ public static Map castToStringObjectMap(Map inputMap) { + if (inputMap instanceof Properties) { + return propsToMap((Properties) inputMap); + } Map map = new HashMap<>(inputMap.size()); for (Map.Entry entry : inputMap.entrySet()) { if (entry.getKey() instanceof String) { @@ -1690,6 +1711,7 @@ public static ConfigDef mergeConfigs(List configDefs) { configDefs.forEach(configDef -> configDef.configKeys().values().forEach(all::define)); return all; } + /** * A runnable that can throw checked exception. */ @@ -1697,4 +1719,17 @@ public static ConfigDef mergeConfigs(List configDefs) { public interface ThrowingRunnable { void run() throws Exception; } + + /** + * convert millisecond to nanosecond, or throw exception if overflow + * @param timeMs the time in millisecond + * @return the converted nanosecond + */ + public static long msToNs(long timeMs) { + try { + return Math.multiplyExact(1000 * 1000, timeMs); + } catch (ArithmeticException e) { + throw new IllegalArgumentException("Cannot convert " + timeMs + " millisecond to nanosecond due to arithmetic overflow", e); + } + } } diff --git a/clients/src/main/resources/common/message/AddRaftVoterRequest.json b/clients/src/main/resources/common/message/AddRaftVoterRequest.json index 74b7638ea2463..1a6e58fbbd4f6 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterRequest.json +++ b/clients/src/main/resources/common/message/AddRaftVoterRequest.json @@ -18,7 +18,8 @@ "type": "request", "listeners": ["controller", "broker"], "name": "AddRaftVoterRequest", - "validVersions": "0", + // Version 1 adds the AckWhenCommitted field. + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", @@ -37,6 +38,8 @@ "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "0+", "about": "The port." } - ]} + ]}, + { "name": "AckWhenCommitted", "type": "bool", "versions": "1+", "default": "true", + "about": "When true, return a response after the new voter set is committed. Otherwise, return after the leader writes the changes locally." } ] } diff --git a/clients/src/main/resources/common/message/AddRaftVoterResponse.json b/clients/src/main/resources/common/message/AddRaftVoterResponse.json index c48f9cdda4e85..d2ae5b1ddcfab 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterResponse.json +++ b/clients/src/main/resources/common/message/AddRaftVoterResponse.json @@ -17,7 +17,8 @@ "apiKey": 80, "type": "response", "name": "AddRaftVoterResponse", - "validVersions": "0", + // Version 1 is the same as version 0 + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/AlterShareGroupOffsetsRequest.json b/clients/src/main/resources/common/message/AlterShareGroupOffsetsRequest.json index 357f4083c81e2..0fc7b8f308afa 100644 --- a/clients/src/main/resources/common/message/AlterShareGroupOffsetsRequest.json +++ b/clients/src/main/resources/common/message/AlterShareGroupOffsetsRequest.json @@ -20,7 +20,6 @@ "name": "AlterShareGroupOffsetsRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, diff --git a/clients/src/main/resources/common/message/AlterShareGroupOffsetsResponse.json b/clients/src/main/resources/common/message/AlterShareGroupOffsetsResponse.json index dbcd1f9c0bcea..c657d90fc72ea 100644 --- a/clients/src/main/resources/common/message/AlterShareGroupOffsetsResponse.json +++ b/clients/src/main/resources/common/message/AlterShareGroupOffsetsResponse.json @@ -7,6 +7,7 @@ "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 0+) // - NOT_COORDINATOR (version 0+) // - COORDINATOR_NOT_AVAILABLE (version 0+) // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) @@ -18,9 +19,13 @@ "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top-level error code, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, { "name": "Responses", "type": "[]AlterShareGroupOffsetsResponseTopic", "versions": "0+", "about": "The results for each topic.", "fields": [ - { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, "about": "The topic name." }, { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID." }, diff --git a/clients/src/main/resources/common/message/DeleteAclsRequest.json b/clients/src/main/resources/common/message/DeleteAclsRequest.json index db605305ae28b..d794295af567e 100644 --- a/clients/src/main/resources/common/message/DeleteAclsRequest.json +++ b/clients/src/main/resources/common/message/DeleteAclsRequest.json @@ -30,7 +30,7 @@ { "name": "ResourceTypeFilter", "type": "int8", "versions": "0+", "about": "The resource type." }, { "name": "ResourceNameFilter", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The resource name." }, + "about": "The resource name, or null to match any resource name." }, { "name": "PatternTypeFilter", "type": "int8", "versions": "1+", "default": "3", "ignorable": false, "about": "The pattern type." }, { "name": "PrincipalFilter", "type": "string", "versions": "0+", "nullableVersions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteRecordsRequest.json b/clients/src/main/resources/common/message/DeleteRecordsRequest.json index fc697944a02bb..969efd63e957d 100644 --- a/clients/src/main/resources/common/message/DeleteRecordsRequest.json +++ b/clients/src/main/resources/common/message/DeleteRecordsRequest.json @@ -33,7 +33,7 @@ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "Offset", "type": "int64", "versions": "0+", - "about": "The deletion offset." } + "about": "The deletion offset. -1 means that records should be truncated to the high watermark." } ]} ]}, { "name": "TimeoutMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteShareGroupOffsetsRequest.json b/clients/src/main/resources/common/message/DeleteShareGroupOffsetsRequest.json index b6a8559cdba76..54616d262577a 100644 --- a/clients/src/main/resources/common/message/DeleteShareGroupOffsetsRequest.json +++ b/clients/src/main/resources/common/message/DeleteShareGroupOffsetsRequest.json @@ -20,16 +20,13 @@ "name": "DeleteShareGroupOffsetsRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, { "name": "Topics", "type": "[]DeleteShareGroupOffsetsRequestTopic", "versions": "0+", "about": "The topics to delete offsets for.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name." }, - { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partitions." } + "about": "The topic name." } ]} ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DeleteShareGroupOffsetsResponse.json b/clients/src/main/resources/common/message/DeleteShareGroupOffsetsResponse.json index f7004678b1d72..ebeb3064504f7 100644 --- a/clients/src/main/resources/common/message/DeleteShareGroupOffsetsResponse.json +++ b/clients/src/main/resources/common/message/DeleteShareGroupOffsetsResponse.json @@ -30,6 +30,7 @@ // - KAFKA_STORAGE_ERROR (version 0+) // - INVALID_REQUEST (version 0+) // - UNKNOWN_SERVER_ERROR (version 0+) + // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, @@ -43,14 +44,10 @@ "about": "The topic name." }, { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID." }, - { "name": "Partitions", "type": "[]DeleteShareGroupOffsetsResponsePartition", "versions": "0+", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The partition-level error code, or 0 if there was no error." }, - { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The partition-level error message, or null if there was no error." } - ]} + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The topic-level error code, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The topic-level error message, or null if there was no error." } ]} ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json b/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json index 37672b92f6561..ff5ad99735845 100644 --- a/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json @@ -20,16 +20,15 @@ "name": "DeleteShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about":"The group identifier." }, + "about": "The group identifier." }, { "name": "Topics", "type": "[]DeleteStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." } ]} diff --git a/clients/src/main/resources/common/message/DescribeClusterResponse.json b/clients/src/main/resources/common/message/DescribeClusterResponse.json index a17e427c8c3e2..1911b1ec33d6f 100644 --- a/clients/src/main/resources/common/message/DescribeClusterResponse.json +++ b/clients/src/main/resources/common/message/DescribeClusterResponse.json @@ -36,7 +36,7 @@ { "name": "ClusterId", "type": "string", "versions": "0+", "about": "The cluster ID that responding broker belongs to." }, { "name": "ControllerId", "type": "int32", "versions": "0+", "default": "-1", "entityType": "brokerId", - "about": "The ID of the controller broker." }, + "about": "The ID of the controller. When handled by a controller, returns the current voter leader ID. When handled by a broker, returns a random alive broker ID as a fallback." }, { "name": "Brokers", "type": "[]DescribeClusterBroker", "versions": "0+", "about": "Each broker in the response.", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "0+", "mapKey": true, "entityType": "brokerId", diff --git a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json index d05785fe8d862..725d1ad337b56 100644 --- a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json +++ b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json @@ -51,11 +51,11 @@ "about": "True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future." }]} ]}, { "name": "TotalBytes", "type": "int64", "versions": "4+", "ignorable": true, "default": "-1", - "about": "The total size in bytes of the volume the log directory is in." + "about": "The total size in bytes of the volume the log directory is in. This value does not include the size of data stored in remote storage." }, { "name": "UsableBytes", "type": "int64", "versions": "4+", "ignorable": true, "default": "-1", - "about": "The usable size in bytes of the volume the log directory is in." + "about": "The usable size in bytes of the volume the log directory is in. This value does not include the size of data stored in remote storage." } ]} ] -} +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json index e9093296bb669..f87c1fc394ce8 100644 --- a/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json +++ b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json @@ -20,7 +20,6 @@ "name": "DescribeShareGroupOffsetsRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "Groups", "type": "[]DescribeShareGroupOffsetsRequestGroup", "versions": "0+", "about": "The groups to describe offsets for.", "fields": [ diff --git a/clients/src/main/resources/common/message/FetchRequest.json b/clients/src/main/resources/common/message/FetchRequest.json index b7ad185f60b39..9ebf86ac424c9 100644 --- a/clients/src/main/resources/common/message/FetchRequest.json +++ b/clients/src/main/resources/common/message/FetchRequest.json @@ -27,7 +27,7 @@ // the request is now relevant. Partitions will be processed in the order // they appear in the request. // - // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be + // Version 4 adds IsolationLevel. Starting in version 4, the requestor must be // able to handle Kafka log message format version 2. // // Version 5 adds LogStartOffset to indicate the earliest available offset of @@ -56,7 +56,9 @@ // Version 16 is the same as version 15 (KIP-951). // // Version 17 adds directory id support from KIP-853 - "validVersions": "4-17", + // + // Version 18 adds high-watermark from KIP-1166 + "validVersions": "4-18", "flexibleVersions": "12+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "12+", "nullableVersions": "12+", "default": "null", @@ -103,7 +105,10 @@ { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", "about": "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored." }, { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "17+", "taggedVersions": "17+", "tag": 0, "ignorable": true, - "about": "The directory id of the follower fetching." } + "about": "The directory id of the follower fetching." }, + { "name": "HighWatermark", "type": "int64", "versions": "18+", "default": "9223372036854775807", "taggedVersions": "18+", + "tag": 1, "ignorable": true, + "about": "The high-watermark known by the replica. -1 if the high-watermark is not known and 9223372036854775807 if the feature is not supported." } ]} ]}, { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "7+", "ignorable": false, diff --git a/clients/src/main/resources/common/message/FetchResponse.json b/clients/src/main/resources/common/message/FetchResponse.json index dc8d35175661f..36dc05ff60ca4 100644 --- a/clients/src/main/resources/common/message/FetchResponse.json +++ b/clients/src/main/resources/common/message/FetchResponse.json @@ -48,7 +48,9 @@ // Version 16 adds the 'NodeEndpoints' field (KIP-951). // // Version 17 no changes to the response (KIP-853). - "validVersions": "4-17", + // + // Version 18 no changes to the response (KIP-1166) + "validVersions": "4-18", "flexibleVersions": "12+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json b/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json index 76de19b48fb0d..7512ce6e51335 100644 --- a/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json @@ -20,7 +20,6 @@ "name": "InitializeShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "about": "The group identifier." }, @@ -29,7 +28,7 @@ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/JoinGroupRequest.json b/clients/src/main/resources/common/message/JoinGroupRequest.json index 41d7c1acbaefe..31afdb1a32ae8 100644 --- a/clients/src/main/resources/common/message/JoinGroupRequest.json +++ b/clients/src/main/resources/common/message/JoinGroupRequest.json @@ -18,8 +18,6 @@ "type": "request", "listeners": ["broker"], "name": "JoinGroupRequest", - // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. - // // Version 1 adds RebalanceTimeoutMs. Version 2 and 3 are the same as version 1. // // Starting from version 4, the client needs to issue a second request to join group @@ -34,7 +32,7 @@ // Version 8 adds the Reason field (KIP-800). // // Version 9 is the same as version 8. - "validVersions": "2-9", + "validVersions": "0-9", "flexibleVersions": "6+", "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", diff --git a/clients/src/main/resources/common/message/JoinGroupResponse.json b/clients/src/main/resources/common/message/JoinGroupResponse.json index 364309596eb95..d2f016f62f66c 100644 --- a/clients/src/main/resources/common/message/JoinGroupResponse.json +++ b/clients/src/main/resources/common/message/JoinGroupResponse.json @@ -17,8 +17,6 @@ "apiKey": 11, "type": "response", "name": "JoinGroupResponse", - // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. - // // Version 1 is the same as version 0. // // Version 2 adds throttle time. @@ -37,7 +35,7 @@ // Version 8 is the same as version 7. // // Version 9 adds the SkipAssignment field. - "validVersions": "2-9", + "validVersions": "0-9", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json b/clients/src/main/resources/common/message/ListConfigResourcesRequest.json similarity index 64% rename from clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json rename to clients/src/main/resources/common/message/ListConfigResourcesRequest.json index b54dce6b7c749..c4b858a715043 100644 --- a/clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json +++ b/clients/src/main/resources/common/message/ListConfigResourcesRequest.json @@ -17,10 +17,15 @@ "apiKey": 74, "type": "request", "listeners": ["broker"], - "name": "ListClientMetricsResourcesRequest", - "validVersions": "0", + "name": "ListConfigResourcesRequest", + // Version 0 is used as ListClientMetricsResourcesRequest which only lists client metrics resources. + // Version 1 adds ResourceTypes field (KIP-1142). If there is no specified ResourceTypes, it should return all configuration resources. + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ + { "name": "ResourceTypes", "type": "[]int8", "versions": "1+", + "about": "The list of resource type. If the list is empty, it uses default supported config resource types." + } ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json b/clients/src/main/resources/common/message/ListConfigResourcesResponse.json similarity index 66% rename from clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json rename to clients/src/main/resources/common/message/ListConfigResourcesResponse.json index 281781c762733..8a2dbdf5a3041 100644 --- a/clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json +++ b/clients/src/main/resources/common/message/ListConfigResourcesResponse.json @@ -16,18 +16,22 @@ { "apiKey": 74, "type": "response", - "name": "ListClientMetricsResourcesResponse", - "validVersions": "0", + "name": "ListConfigResourcesResponse", + // Version 0 is used as ListClientMetricsResourcesResponse which returns all client metrics resources. + // Version 1 adds ResourceType to ConfigResources (KIP-1142). + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no error." }, - { "name": "ClientMetricsResources", "type": "[]ClientMetricsResource", "versions": "0+", - "about": "Each client metrics resource in the response.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", - "about": "The resource name." } + { "name": "ConfigResources", "type": "[]ConfigResource", "versions": "0+", + "about": "Each config resource in the response.", "fields": [ + { "name": "ResourceName", "type": "string", "versions": "0+", + "about": "The resource name." }, + { "name": "ResourceType", "type": "int8", "versions": "1+", "ignorable": true, "default": 16, + "about": "The resource type." } ]} ] } diff --git a/clients/src/main/resources/common/message/ListOffsetsRequest.json b/clients/src/main/resources/common/message/ListOffsetsRequest.json index 6f8ff7d6cf935..1a2de6ca30a2f 100644 --- a/clients/src/main/resources/common/message/ListOffsetsRequest.json +++ b/clients/src/main/resources/common/message/ListOffsetsRequest.json @@ -40,7 +40,9 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - "validVersions": "1-10", + // + // Version 11 enables listing offsets by earliest pending upload offset (KIP-1023) + "validVersions": "1-11", "flexibleVersions": "6+", "latestVersionUnstable": false, "fields": [ diff --git a/clients/src/main/resources/common/message/ListOffsetsResponse.json b/clients/src/main/resources/common/message/ListOffsetsResponse.json index 7f9588847b9a0..1407273bf4d8c 100644 --- a/clients/src/main/resources/common/message/ListOffsetsResponse.json +++ b/clients/src/main/resources/common/message/ListOffsetsResponse.json @@ -40,7 +40,9 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - "validVersions": "1-10", + // + // Version 11 enables listing offsets by earliest pending upload offset (KIP-1023) + "validVersions": "1-11", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListTransactionsRequest.json b/clients/src/main/resources/common/message/ListTransactionsRequest.json index 5d7c688da2213..57d42e6b99c41 100644 --- a/clients/src/main/resources/common/message/ListTransactionsRequest.json +++ b/clients/src/main/resources/common/message/ListTransactionsRequest.json @@ -19,7 +19,9 @@ "listeners": ["broker"], "name": "ListTransactionsRequest", // Version 1: adds DurationFilter to list transactions older than specified duration - "validVersions": "0-1", + + // Version 2: adds TransactionalIdPattern to list transactions with the same pattern(KIP-1152) + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "StateFilters", "type": "[]string", "versions": "0+", @@ -30,6 +32,9 @@ }, { "name": "DurationFilter", "type": "int64", "versions": "1+", "default": -1, "about": "Duration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned." + }, + { "name": "TransactionalIdPattern", "type": "string", "versions": "2+", "nullableVersions": "2+", "default": "null", + "about": "The transactional ID regular expression pattern to filter by: if it is empty or null, all transactions are returned; Otherwise then only the transactions matching the given regular expression will be returned." } ] } diff --git a/clients/src/main/resources/common/message/ListTransactionsResponse.json b/clients/src/main/resources/common/message/ListTransactionsResponse.json index 3872cf24a3075..0af1be699b435 100644 --- a/clients/src/main/resources/common/message/ListTransactionsResponse.json +++ b/clients/src/main/resources/common/message/ListTransactionsResponse.json @@ -18,7 +18,9 @@ "type": "response", "name": "ListTransactionsResponse", // Version 1 is the same as version 0 (KIP-994). - "validVersions": "0-1", + + // This API can return InvalidRegularExpression (KIP-1152). + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetCommitRequest.json b/clients/src/main/resources/common/message/OffsetCommitRequest.json index 348ed2b90c5c8..ba3c12f0e2b47 100644 --- a/clients/src/main/resources/common/message/OffsetCommitRequest.json +++ b/clients/src/main/resources/common/message/OffsetCommitRequest.json @@ -36,8 +36,11 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The // request is the same as version 8. - "validVersions": "2-9", + // + // Version 10 adds support for topic ids and removes support for topic names (KIP-848). + "validVersions": "2-10", "flexibleVersions": "8+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The unique group identifier." }, @@ -52,8 +55,10 @@ "about": "The time period in ms to retain the offset." }, { "name": "Topics", "type": "[]OffsetCommitRequestTopic", "versions": "0+", "about": "The topics to commit offsets for.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", + { "name": "Name", "type": "string", "versions": "0-9", "entityType": "topicName", "ignorable": true, "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, + "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetCommitRequestPartition", "versions": "0+", "about": "Each partition to commit offsets for.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetCommitResponse.json b/clients/src/main/resources/common/message/OffsetCommitResponse.json index 0cccd64816c47..0228733ce6bb0 100644 --- a/clients/src/main/resources/common/message/OffsetCommitResponse.json +++ b/clients/src/main/resources/common/message/OffsetCommitResponse.json @@ -34,7 +34,9 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH when the new consumer group protocol is used and // GROUP_ID_NOT_FOUND when the group does not exist for both protocols. - "validVersions": "2-9", + // + // Version 10 adds support for topic ids and removes support for topic names (KIP-848). + "validVersions": "2-10", "flexibleVersions": "8+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -47,13 +49,16 @@ // - FENCED_MEMBER_EPOCH (version 7+) // - GROUP_ID_NOT_FOUND (version 9+) // - STALE_MEMBER_EPOCH (version 9+) + // - UNKNOWN_TOPIC_ID (version 10+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "Topics", "type": "[]OffsetCommitResponseTopic", "versions": "0+", "about": "The responses for each topic.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", + { "name": "Name", "type": "string", "versions": "0-9", "entityType": "topicName", "ignorable": true, "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, + "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetCommitResponsePartition", "versions": "0+", "about": "The responses for each partition in the topic.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetFetchRequest.json b/clients/src/main/resources/common/message/OffsetFetchRequest.json index 88f5b568d724c..df831eba756d9 100644 --- a/clients/src/main/resources/common/message/OffsetFetchRequest.json +++ b/clients/src/main/resources/common/message/OffsetFetchRequest.json @@ -38,8 +38,11 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). It adds // the MemberId and MemberEpoch fields. Those are filled in and validated when the new consumer protocol is used. - "validVersions": "1-9", + // + // Version 10 adds support for topic ids and removes support for topic names (KIP-848). + "validVersions": "1-10", "flexibleVersions": "6+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0-7", "entityType": "groupId", "about": "The group to fetch offsets for." }, @@ -60,8 +63,10 @@ "about": "The member epoch if using the new consumer protocol (KIP-848)." }, { "name": "Topics", "type": "[]OffsetFetchRequestTopics", "versions": "8+", "nullableVersions": "8+", "about": "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.", "fields": [ - { "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName", + { "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true, "about": "The topic name."}, + { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, + "about": "The topic ID." }, { "name": "PartitionIndexes", "type": "[]int32", "versions": "8+", "about": "The partition indexes we would like to fetch offsets for." } ]} diff --git a/clients/src/main/resources/common/message/OffsetFetchResponse.json b/clients/src/main/resources/common/message/OffsetFetchResponse.json index 9f0a5157cc424..e92590e38e10c 100644 --- a/clients/src/main/resources/common/message/OffsetFetchResponse.json +++ b/clients/src/main/resources/common/message/OffsetFetchResponse.json @@ -38,7 +38,9 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH and UNKNOWN_MEMBER_ID errors when the new consumer group // protocol is used. - "validVersions": "1-9", + // + // Version 10 adds support for topic ids and removes support for topic names (KIP-848). + "validVersions": "1-10", "flexibleVersions": "6+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -49,6 +51,7 @@ // - UNSTABLE_OFFSET_COMMIT (version 7+) // - UNKNOWN_MEMBER_ID (version 9+) // - STALE_MEMBER_EPOCH (version 9+) + // - UNKNOWN_TOPIC_ID (version 10+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, @@ -78,8 +81,10 @@ "about": "The group ID." }, { "name": "Topics", "type": "[]OffsetFetchResponseTopics", "versions": "8+", "about": "The responses per topic.", "fields": [ - { "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName", + { "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true, "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, + "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetFetchResponsePartitions", "versions": "8+", "about": "The responses per partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "8+", diff --git a/clients/src/main/resources/common/message/ProduceRequest.json b/clients/src/main/resources/common/message/ProduceRequest.json index 0bb29f92378dd..3b46a1ff5f468 100644 --- a/clients/src/main/resources/common/message/ProduceRequest.json +++ b/clients/src/main/resources/common/message/ProduceRequest.json @@ -46,7 +46,8 @@ // transaction V2 (KIP_890 part 2) is enabled, the produce request will also include the function for a // AddPartitionsToTxn call. If V2 is disabled, the client can't use produce request version higher than 11 within // a transaction. - "validVersions": "3-12", + // Version 13 replaces topic names with topic IDs (KIP-516). May return UNKNOWN_TOPIC_ID error code. + "validVersions": "3-13", "flexibleVersions": "9+", "fields": [ { "name": "TransactionalId", "type": "string", "versions": "3+", "nullableVersions": "3+", "default": "null", "entityType": "transactionalId", @@ -57,8 +58,9 @@ "about": "The timeout to await a response in milliseconds." }, { "name": "TopicData", "type": "[]TopicProduceData", "versions": "0+", "about": "Each topic to produce to.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, + { "name": "Name", "type": "string", "versions": "0-12", "entityType": "topicName", "mapKey": true, "ignorable": true, "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "13+", "mapKey": true, "ignorable": true, "about": "The unique topic ID" }, { "name": "PartitionData", "type": "[]PartitionProduceData", "versions": "0+", "about": "Each partition to produce to.", "fields": [ { "name": "Index", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ProduceResponse.json b/clients/src/main/resources/common/message/ProduceResponse.json index fafcd86401d40..adf08e94a35d4 100644 --- a/clients/src/main/resources/common/message/ProduceResponse.json +++ b/clients/src/main/resources/common/message/ProduceResponse.json @@ -40,13 +40,15 @@ // Version 11 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). // // Version 12 is the same as version 10 (KIP-890). - "validVersions": "3-12", + // Version 13 replaces topic names with topic IDs (KIP-516). May return UNKNOWN_TOPIC_ID error code. + "validVersions": "3-13", "flexibleVersions": "9+", "fields": [ { "name": "Responses", "type": "[]TopicProduceResponse", "versions": "0+", "about": "Each produce response.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, + { "name": "Name", "type": "string", "versions": "0-12", "entityType": "topicName", "mapKey": true, "ignorable": true, "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "13+", "mapKey": true, "ignorable": true, "about": "The unique topic ID" }, { "name": "PartitionResponses", "type": "[]PartitionProduceResponse", "versions": "0+", "about": "Each partition that we produced to within the topic.", "fields": [ { "name": "Index", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/README.md b/clients/src/main/resources/common/message/README.md index 8c92fd6bc9475..435635214f25e 100644 --- a/clients/src/main/resources/common/message/README.md +++ b/clients/src/main/resources/common/message/README.md @@ -34,9 +34,7 @@ specifies the versions of the protocol that our code understands. For example, specifying "0-2" indicates that we understand versions 0, 1, and 2. You must always specify the highest message version which is supported. -The only old message versions that are no longer supported are version 0 of -MetadataRequest and MetadataResponse. In general, since we adopted KIP-97, -dropping support for old message versions is no longer allowed without a KIP. +Dropping support for old message versions is no longer allowed without a KIP. Therefore, please be careful not to increase the lower end of the version support interval for any message. diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json b/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json index d32b99e0f59f0..a19913dfde847 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json @@ -20,16 +20,15 @@ "name": "ReadShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about":"The group identifier." }, + "about": "The group identifier." }, { "name": "Topics", "type": "[]ReadStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json index 7815f7b50c7d1..734a0fefdc4d3 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json @@ -24,6 +24,7 @@ // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) + // - FENCED_LEADER_EPOCH (version 0+) // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]ReadStateResult", "versions": "0+", @@ -39,17 +40,17 @@ { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The error message, or null if there was no error." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", - "about": "The state epoch for this share-partition." }, + "about": "The state epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset, which can be -1 if it is not yet initialized." }, { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "about": "The state batches for this share-partition.", "fields":[ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "The base offset of this state batch." }, + "about": "The first offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0+", - "about": "The state - 0:Available,2:Acked,4:Archived." }, + "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count." } ]} diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json index 870f01f3fd494..cdbad63bfa22b 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json @@ -20,16 +20,15 @@ "name": "ReadShareGroupStateSummaryRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about":"The group identifier." }, + "about": "The group identifier." }, { "name": "Topics", "type": "[]ReadStateSummaryData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json index ddf9d7044a6a3..81e3edc554ece 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json @@ -41,6 +41,8 @@ "about": "The error message, or null if there was no error." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", "about": "The state epoch of the share-partition." }, + { "name": "LeaderEpoch", "type": "int32", "versions": "0+", + "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset." } ]} diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json index db534cb4c1c13..561f4a84d2f6d 100644 --- a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json +++ b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json @@ -18,12 +18,11 @@ "type": "request", "listeners": ["broker"], "name": "ShareAcknowledgeRequest", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", - // The ShareAcknowledgeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", "about": "The group identifier." }, @@ -33,19 +32,19 @@ "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, { "name": "Topics", "type": "[]AcknowledgeTopic", "versions": "0+", "about": "The topics containing records to acknowledge.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID.", "mapKey": true }, { "name": "Partitions", "type": "[]AcknowledgePartition", "versions": "0+", "about": "The partitions containing records to acknowledge.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "mapKey": true, "about": "The partition index." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge."}, + "about": "First offset of batch of records to acknowledge." }, { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge."}, + "about": "Last offset (inclusive) of batch of records to acknowledge." }, { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} + "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject." } ]} ]} ]} diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json index 1f726a0c7d6a4..65d0875698331 100644 --- a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json +++ b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json @@ -17,7 +17,10 @@ "apiKey": 79, "type": "response", "name": "ShareAcknowledgeResponse", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -32,16 +35,16 @@ // - INVALID_REQUEST (version 0+) // - UNKNOWN_SERVER_ERROR (version 0+) "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, + { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top level response error code." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, - "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", "mapKey": true, + "about": "The unique topic ID." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json index 82ac3093db21e..d7a4abf1fbb34 100644 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ b/clients/src/main/resources/common/message/ShareFetchRequest.json @@ -18,12 +18,11 @@ "type": "request", "listeners": ["broker"], "name": "ShareFetchRequest", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", - // The ShareFetchRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", "about": "The group identifier." }, @@ -37,17 +36,19 @@ "about": "The minimum bytes to accumulate in the response." }, { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, - { "name": "MaxRecords", "type": "int32", "versions": "0+", + { "name": "MaxRecords", "type": "int32", "versions": "1+", "about": "The maximum number of records to fetch. This limit can be exceeded for alignment of batch boundaries." }, - { "name": "BatchSize", "type": "int32", "versions": "0+", + { "name": "BatchSize", "type": "int32", "versions": "1+", "about": "The optimal number of records for batches of acquired records and acknowledgements." }, { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", "about": "The topics to fetch.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID.", "mapKey": true }, { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", "about": "The partitions to fetch.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "mapKey": true, "about": "The partition index." }, + { "name": "PartitionMaxBytes", "type": "int32", "versions": "0", + "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json index ed459f304af13..5d4ede78da3d3 100644 --- a/clients/src/main/resources/common/message/ShareFetchResponse.json +++ b/clients/src/main/resources/common/message/ShareFetchResponse.json @@ -17,7 +17,10 @@ "apiKey": 78, "type": "response", "name": "ShareFetchResponse", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", // Supported errors for ErrorCode and AcknowledgeErrorCode: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -39,12 +42,12 @@ "about": "The top-level response error code." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, - { "name": "AcquisitionLockTimeoutMs", "type": "int32", "versions": "0+", + { "name": "AcquisitionLockTimeoutMs", "type": "int32", "versions": "1+", "about": "The time in milliseconds for which the acquired records are locked." }, { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", "mapKey": true, + "about": "The unique topic ID." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -64,11 +67,11 @@ { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "about": "The latest known leader epoch." } ]}, - { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}, + { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0", "about": "The record data." }, { "name": "AcquiredRecords", "type": "[]AcquiredRecords", "versions": "0+", "about": "The acquired records.", "fields": [ - {"name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records."}, - {"name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records."}, - {"name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records."} + { "name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records." }, + { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records." }, + { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records." } ]} ]} ]}, diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json index 5efd435939db1..897b8bc7b2a94 100644 --- a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json @@ -18,12 +18,11 @@ "type": "request", "listeners": ["broker"], "name": "ShareGroupDescribeRequest", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", - // The ShareGroupDescribeRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, "fields": [ { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", "about": "The ids of the groups to describe." }, diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json index e90e431f64e43..57595c1b51c9b 100644 --- a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json +++ b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json @@ -17,17 +17,20 @@ "apiKey": 77, "type": "response", "name": "ShareGroupDescribeResponse", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 1+) // - NOT_COORDINATOR (version 0+) // - COORDINATOR_NOT_AVAILABLE (version 0+) // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) // - INVALID_GROUP_ID (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) + // - INVALID_REQUEST (version 0+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json index e49e022d45864..37d5d04efb2a4 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json @@ -18,12 +18,11 @@ "type": "request", "listeners": ["broker"], "name": "ShareGroupHeartbeatRequest", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", - // The ShareGroupHeartbeatRequest API is added as part of KIP-932 and is still under - // development. Hence, the API is not exposed by default by brokers unless - // explicitly enabled. - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, @@ -31,7 +30,7 @@ "about": "The member id generated by the consumer. The member id must be kept during the entire lifetime of the consumer process." }, { "name": "MemberEpoch", "type": "int32", "versions": "0+", "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "topicName", "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json index 75aa62b76f4fc..c12eb3dca2021 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json @@ -17,17 +17,20 @@ "apiKey": 76, "type": "response", "name": "ShareGroupHeartbeatResponse", - "validVersions": "0", + // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. + // + // Version 1 is the initial stable version (KIP-932). + "validVersions": "1", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 1+) // - NOT_COORDINATOR (version 0+) // - COORDINATOR_NOT_AVAILABLE (version 0+) // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) - // - INVALID_REQUEST (version 0+) // - UNKNOWN_MEMBER_ID (version 0+) // - GROUP_MAX_SIZE_REACHED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 0+) + // - INVALID_REQUEST (version 0+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, diff --git a/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json b/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json index 5dff3d7bf4449..520a1ec4aee49 100644 --- a/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json +++ b/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json @@ -57,9 +57,9 @@ "fields": [ { "name": "SubtopologyId", "type": "string", "versions": "0+", "about": "String to uniquely identify the subtopology." }, - { "name": "SourceTopics", "type": "[]string", "versions": "0+", + { "name": "SourceTopics", "type": "[]string", "versions": "0+", "entityType": "topicName", "about": "The topics the subtopology reads from." }, - { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", + { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", "entityType": "topicName", "about": "The repartition topics the subtopology writes to." }, { "name": "StateChangelogTopics", "type": "[]TopicInfo", "versions": "0+", "about": "The set of state changelog topics associated with this subtopology. Created automatically." }, @@ -154,7 +154,7 @@ "about": "value of the config" } ]}, { "name": "TopicInfo", "versions": "0+", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The name of the topic." }, { "name": "Partitions", "type": "int32", "versions": "0+", "about": "The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics." }, diff --git a/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json index 6af7fad4d2ba1..8b63e037fc0a1 100644 --- a/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json @@ -20,7 +20,7 @@ "name": "StreamsGroupHeartbeatRequest", "validVersions": "0", "flexibleVersions": "0+", - // The StreamsGroupDescribeRequest API is added as part of KIP-1071 and is still under + // The StreamsGroupHeartbeatRequest API is added as part of KIP-1071 and is still under // development. Hence, the API is not exposed by default by brokers unless // explicitly enabled. "latestVersionUnstable": true, @@ -31,6 +31,8 @@ "about": "The member ID generated by the streams consumer. The member ID must be kept during the entire lifetime of the streams consumer process." }, { "name": "MemberEpoch", "type": "int32", "versions": "0+", "about": "The current member epoch; 0 to join the group; -1 to leave the group; -2 to indicate that the static member will rejoin." }, + { "name": "EndpointInformationEpoch", "type": "int32", "versions": "0+", + "about": "The current endpoint epoch of this client, represents the latest endpoint epoch this client received"}, { "name": "InstanceId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "null if not provided or if it didn't change since the last heartbeat; the instance ID for static membership otherwise." }, { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", @@ -48,13 +50,13 @@ "fields": [ { "name": "SubtopologyId", "type": "string", "versions": "0+", "about": "String to uniquely identify the subtopology. Deterministically generated from the topology" }, - { "name": "SourceTopics", "type": "[]string", "versions": "0+", + { "name": "SourceTopics", "type": "[]string", "versions": "0+", "entityType": "topicName", "about": "The topics the topology reads from." }, { "name": "SourceTopicRegex", "type": "[]string", "versions": "0+", "about": "The regular expressions identifying topics the subtopology reads from." }, { "name": "StateChangelogTopics", "type": "[]TopicInfo", "versions": "0+", "about": "The set of state changelog topics associated with this subtopology. Created automatically." }, - { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", + { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", "entityType": "topicName", "about": "The repartition topics the subtopology writes to." }, { "name": "RepartitionSourceTopics", "type": "[]TopicInfo", "versions": "0+", "about": "The set of source topics that are internally created repartition topics. Created automatically." }, @@ -102,7 +104,7 @@ "about": "value of the config" } ]}, { "name": "TopicInfo", "versions": "0+", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The name of the topic." }, { "name": "Partitions", "type": "int32", "versions": "0+", "about": "The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics." }, @@ -133,4 +135,4 @@ "about": "The partitions of the input topics processed by this member." } ]} ] -} \ No newline at end of file +} diff --git a/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json index a5f3a99f9deb5..efeaf4525716f 100644 --- a/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json +++ b/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json @@ -65,13 +65,17 @@ "about": "Assigned warm-up tasks for this client. Null if unchanged since last heartbeat." }, // IQ-related information + { "name": "EndpointInformationEpoch", "type": "int32", "versions": "0+", + "about": "The endpoint epoch set in the response"}, { "name": "PartitionsByUserEndpoint", "type": "[]EndpointToPartitions", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "Global assignment information used for IQ. Null if unchanged since last heartbeat." , "fields": [ { "name": "UserEndpoint", "type": "Endpoint", "versions": "0+", "about": "User-defined endpoint to connect to the node" }, - { "name": "Partitions", "type": "[]TopicPartition", "versions": "0+", - "about": "All partitions available on the node" } + { "name": "ActivePartitions", "type": "[]TopicPartition", "versions": "0+", + "about": "All topic partitions materialized by active tasks on the node" }, + { "name": "StandbyPartitions", "type": "[]TopicPartition", "versions": "0+", + "about": "All topic partitions materialized by standby tasks on the node" } ] } ], @@ -96,7 +100,7 @@ "about": "A string representation of the status." } ]}, { "name": "TopicPartition", "versions": "0+", "fields": [ - { "name": "Topic", "type": "string", "versions": "0+", + { "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName", "about": "topic name" }, { "name": "Partitions", "type": "[]int32", "versions": "0+", "about": "partitions" } @@ -114,4 +118,4 @@ "about": "port of the endpoint" } ]} ] -} \ No newline at end of file +} diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json index c0584542739ea..9ebe169c8d6ac 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json @@ -20,20 +20,19 @@ "name": "WriteShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about":"The group identifier." }, + "about": "The group identifier." }, { "name": "Topics", "type": "[]WriteStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", - "about": "The state epoch for this share-partition." }, + "about": "The state epoch of the share-partition." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", @@ -41,11 +40,11 @@ { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "about": "The state batches for the share-partition.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "The base offset of this state batch." }, + "about": "The first offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0+", - "about": "The state - 0:Available,2:Acked,4:Archived." }, + "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count." } ]} diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json index e529126c44b77..8d4050476519c 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json @@ -24,6 +24,7 @@ // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) + // - FENCED_LEADER_EPOCH (version 0+) // - FENCED_STATE_EPOCH (version 0+) // - INVALID_REQUEST (version 0+) "fields": [ diff --git a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java index f647d95445f00..9812f490ddd7f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java @@ -186,7 +186,7 @@ public void testAuthorizationFailed() { connectionStates.authenticationFailed(nodeId1, time.milliseconds(), new AuthenticationException("No path to CA for certificate!")); time.sleep(1000); - assertEquals(connectionStates.connectionState(nodeId1), ConnectionState.AUTHENTICATION_FAILED); + assertEquals(ConnectionState.AUTHENTICATION_FAILED, connectionStates.connectionState(nodeId1)); assertNotNull(connectionStates.authenticationException(nodeId1)); assertFalse(connectionStates.hasReadyNodes(time.milliseconds())); assertFalse(connectionStates.canConnect(nodeId1, time.milliseconds())); @@ -210,7 +210,7 @@ public void testRemoveNode() { connectionStates.remove(nodeId1); assertTrue(connectionStates.canConnect(nodeId1, time.milliseconds())); assertFalse(connectionStates.isBlackedOut(nodeId1, time.milliseconds())); - assertEquals(connectionStates.connectionDelay(nodeId1, time.milliseconds()), 0L); + assertEquals(0L, connectionStates.connectionDelay(nodeId1, time.milliseconds())); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java index 3166206f0a41f..1c50666c6afd0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java @@ -217,7 +217,8 @@ public void testSessionless() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 0, 0), - new RespEntry("foo", 1, fooId, 0, 0))); + new RespEntry("foo", 1, fooId, 0, 0)), + List.of()); handler.handleResponse(resp, version); FetchSessionHandler.Builder builder2 = handler.newBuilder(); @@ -258,7 +259,8 @@ public void testIncrementals() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), - new RespEntry("foo", 1, fooId, 10, 20))); + new RespEntry("foo", 1, fooId, 10, 20)), + List.of()); handler.handleResponse(resp, version); // Test an incremental fetch request which adds one partition and modifies another. @@ -280,13 +282,15 @@ public void testIncrementals() { data2.toSend()); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 1, fooId, 20, 20))); + respMap(new RespEntry("foo", 1, fooId, 20, 20)), + List.of()); handler.handleResponse(resp2, version); // Skip building a new request. Test that handling an invalid fetch session epoch response results // in a request which closes the session. FetchResponse resp3 = FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, INVALID_SESSION_ID, - respMap()); + respMap(), + List.of()); handler.handleResponse(resp3, version); FetchSessionHandler.Builder builder4 = handler.newBuilder(); @@ -346,7 +350,8 @@ public void testIncrementalPartitionRemoval() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20))); + new RespEntry("bar", 0, barId, 10, 20)), + List.of()); handler.handleResponse(resp, version); // Test an incremental fetch request which removes two partitions. @@ -366,8 +371,9 @@ public void testIncrementalPartitionRemoval() { // A FETCH_SESSION_ID_NOT_FOUND response triggers us to close the session. // The next request is a session establishing FULL request. - FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, - respMap()); + FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, + respMap(), + List.of()); handler.handleResponse(resp2, version); FetchSessionHandler.Builder builder3 = handler.newBuilder(); @@ -399,7 +405,8 @@ public void testTopicIdUsageGrantedOnIdUpgrade() { assertFalse(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, Uuid.ZERO_UUID, 10, 20))); + respMap(new RespEntry("foo", 0, Uuid.ZERO_UUID, 10, 20)), + List.of()); handler.handleResponse(resp, (short) 12); // Try to add a topic ID to an already existing topic partition (0) or a new partition (1) in the session. @@ -436,7 +443,8 @@ public void testIdUsageRevokedOnIdDowngrade() { assertTrue(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, fooId, 10, 20))); + respMap(new RespEntry("foo", 0, fooId, 10, 20)), + List.of()); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Try to remove a topic ID from an existing topic partition (0) or add a new topic partition (1) without an ID. @@ -475,7 +483,7 @@ public void testTopicIdReplaced(boolean startsWithTopicIds, boolean endsWithTopi assertTrue(data.metadata().isFull()); assertEquals(startsWithTopicIds, data.canUseTopicIds()); - FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20))); + FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20)), List.of()); short version = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12; handler.handleResponse(resp, version); @@ -548,7 +556,7 @@ public void testSessionEpochWhenMixedUsageOfTopicIDs(boolean startsWithTopicIds) assertEquals(startsWithTopicIds, data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, fooId, 10, 20))); + respMap(new RespEntry("foo", 0, fooId, 10, 20)), List.of()); handler.handleResponse(resp, responseVersion); // Re-add the first partition. Then add a partition with opposite ID usage. @@ -583,7 +591,8 @@ public void testIdUsageWithAllForgottenPartitions(boolean useTopicIds) { assertEquals(useTopicIds, data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, topicId, 10, 20))); + respMap(new RespEntry("foo", 0, topicId, 10, 20)), + List.of()); handler.handleResponse(resp, responseVersion); // Remove the topic from the session @@ -610,7 +619,8 @@ public void testOkToAddNewIdAfterTopicRemovedFromSession() { assertTrue(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, topicId, 10, 20))); + respMap(new RespEntry("foo", 0, topicId, 10, 20)), + List.of()); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Remove the partition from the session. Return a session ID as though the session is still open. @@ -619,7 +629,8 @@ public void testOkToAddNewIdAfterTopicRemovedFromSession() { assertMapsEqual(new LinkedHashMap<>(), data2.toSend(), data2.sessionPartitions()); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, - new LinkedHashMap<>()); + new LinkedHashMap<>(), + List.of()); handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion()); // After the topic is removed, add a recreated topic with a new ID. @@ -651,7 +662,8 @@ public void testVerifyFullFetchResponsePartitions() { FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20))); + new RespEntry("bar", 0, barId, 10, 20)), + List.of()); String issue = handler.verifyFullFetchResponsePartitions(resp1.responseData(topicNames, version).keySet(), resp1.topicIds(), version); assertTrue(issue.contains("extraPartitions=")); @@ -664,13 +676,15 @@ public void testVerifyFullFetchResponsePartitions() { FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20))); + new RespEntry("bar", 0, barId, 10, 20)), + List.of()); String issue2 = handler.verifyFullFetchResponsePartitions(resp2.responseData(topicNames, version).keySet(), resp2.topicIds(), version); assertNull(issue2); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), - new RespEntry("foo", 1, fooId, 10, 20))); + new RespEntry("foo", 1, fooId, 10, 20)), + List.of()); String issue3 = handler.verifyFullFetchResponsePartitions(resp3.responseData(topicNames, version).keySet(), resp3.topicIds(), version); assertFalse(issue3.contains("extraPartitions=")); @@ -689,7 +703,8 @@ public void testVerifyFullFetchResponsePartitionsWithTopicIds() { FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("extra2", 1, topicIds.get("extra2"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), + List.of()); String issue = handler.verifyFullFetchResponsePartitions(resp1.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp1.topicIds(), ApiKeys.FETCH.latestVersion()); assertTrue(issue.contains("extraPartitions=")); @@ -703,14 +718,16 @@ public void testVerifyFullFetchResponsePartitionsWithTopicIds() { FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("extra2", 1, topicIds.get("extra2"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), + List.of()); String issue2 = handler.verifyFullFetchResponsePartitions(resp2.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp2.topicIds(), ApiKeys.FETCH.latestVersion()); assertTrue(issue2.contains("extraPartitions=")); assertFalse(issue2.contains("omittedPartitions=")); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), + List.of()); String issue3 = handler.verifyFullFetchResponsePartitions(resp3.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp3.topicIds(), ApiKeys.FETCH.latestVersion()); assertNull(issue3); @@ -734,7 +751,8 @@ public void testTopLevelErrorResetsMetadata() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), - new RespEntry("foo", 1, topicIds.get("foo"), 10, 20))); + new RespEntry("foo", 1, topicIds.get("foo"), 10, 20)), + List.of()); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Test an incremental fetch request which adds an ID unknown to the broker. @@ -749,7 +767,8 @@ public void testTopLevelErrorResetsMetadata() { // Return and handle a response with a top level error FetchResponse resp2 = FetchResponse.of(Errors.UNKNOWN_TOPIC_ID, 0, 123, - respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID))); + respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID)), + List.of()); assertFalse(handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion())); // Ensure we start with a new epoch. This will close the session in the next request. diff --git a/clients/src/test/java/org/apache/kafka/clients/GroupRebalanceConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/GroupRebalanceConfigTest.java new file mode 100644 index 0000000000000..41bd0cff42bf1 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/GroupRebalanceConfigTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients; + +import org.apache.kafka.clients.consumer.ConsumerConfig; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class GroupRebalanceConfigTest { + + @ParameterizedTest + @EnumSource(value = GroupRebalanceConfig.ProtocolType.class, names = {"CONSUMER", "SHARE"}) + void testRackIdIsEmptyIfNoDefined(GroupRebalanceConfig.ProtocolType protocolType) { + GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( + new ConsumerConfig(Map.of( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + )), + protocolType + ); + assertTrue(groupRebalanceConfig.rackId.isEmpty()); + } + + @ParameterizedTest + @EnumSource(value = GroupRebalanceConfig.ProtocolType.class, names = {"CONSUMER", "SHARE"}) + void testRackIdIsEmptyIfValueIsEmptyString(GroupRebalanceConfig.ProtocolType protocolType) { + GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( + new ConsumerConfig(Map.of( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.CLIENT_RACK_CONFIG, "", + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + )), + protocolType + ); + assertTrue(groupRebalanceConfig.rackId.isEmpty()); + } + + @ParameterizedTest + @EnumSource(value = GroupRebalanceConfig.ProtocolType.class, names = {"CONSUMER", "SHARE"}) + void testRackIdIsNotEmptyIfDefined(GroupRebalanceConfig.ProtocolType protocolType) { + GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( + new ConsumerConfig(Map.of( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer", + ConsumerConfig.CLIENT_RACK_CONFIG, "rack1", + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + )), + protocolType + ); + assertTrue(groupRebalanceConfig.rackId.isPresent()); + assertEquals("rack1", groupRebalanceConfig.rackId.get()); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java index 006de9d06d987..b60efe8950ff8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java @@ -102,7 +102,7 @@ public void testCompleteNext() { } @Test - public void testCompleteNextThrowsIfNoInflights() { + public void testCompleteNextThrowsIfNoInFlights() { assertThrows(IllegalStateException.class, () -> inFlightRequests.completeNext(dest)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index cb70207e3d97b..9ac7519100465 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -32,6 +32,7 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; @@ -43,7 +44,6 @@ import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -209,8 +209,8 @@ public void testIgnoreLeaderEpochInOlderMetadataResponse() { .setBrokers(new MetadataResponseBrokerCollection()); for (short version = ApiKeys.METADATA.oldestVersion(); version < 9; version++) { - ByteBuffer buffer = MessageUtil.toByteBufferAccessor(data, version).buffer(); - MetadataResponse response = MetadataResponse.parse(buffer, version); + Readable readable = MessageUtil.toByteBufferAccessor(data, version); + MetadataResponse response = MetadataResponse.parse(readable, version); assertFalse(response.hasReliableLeaderEpochs()); metadata.updateWithCurrentRequestVersion(response, false, 100); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); @@ -219,8 +219,8 @@ public void testIgnoreLeaderEpochInOlderMetadataResponse() { } for (short version = 9; version <= ApiKeys.METADATA.latestVersion(); version++) { - ByteBuffer buffer = MessageUtil.toByteBufferAccessor(data, version).buffer(); - MetadataResponse response = MetadataResponse.parse(buffer, version); + Readable readable = MessageUtil.toByteBufferAccessor(data, version); + MetadataResponse response = MetadataResponse.parse(readable, version); assertTrue(response.hasReliableLeaderEpochs()); metadata.updateWithCurrentRequestVersion(response, false, 100); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); @@ -361,28 +361,28 @@ public void testUpdateLastEpoch() { // Metadata with newer epoch is handled metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); // Don't update to an older one assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 1)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); // Don't cause update if it's the same one assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 10)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); // Update if we see newer epoch assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 12)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); // Don't overwrite metadata with older epoch metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 11); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); } @Test @@ -465,7 +465,7 @@ public void testRejectOldMetadata() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNotNull(metadata.fetch().partition(tp)); assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent()); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } // Fake an empty ISR, but with an older epoch, should reject it @@ -475,8 +475,8 @@ public void testRejectOldMetadata() { new MetadataResponse.PartitionMetadata(error, partition, leader, leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); - assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 1); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(1, metadata.fetch().partition(tp).inSyncReplicas().length); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } // Fake an empty ISR, with same epoch, accept it @@ -486,8 +486,8 @@ public void testRejectOldMetadata() { new MetadataResponse.PartitionMetadata(error, partition, leader, leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); - assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 0); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(0, metadata.fetch().partition(tp).inSyncReplicas().length); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } // Empty metadata response, should not keep old partition but should keep the last-seen epoch @@ -495,7 +495,7 @@ public void testRejectOldMetadata() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); assertNull(metadata.fetch().partition(tp)); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } // Back in the metadata, with old epoch, should not get added @@ -503,7 +503,7 @@ public void testRejectOldMetadata() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 99); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNull(metadata.fetch().partition(tp)); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } } @@ -522,31 +522,31 @@ public void testOutOfBandEpochUpdate() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNotNull(metadata.fetch().partition(tp)); assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent()); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); + assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); // Simulate a leader epoch from another response, like a fetch response or list offsets assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 101)); // Cache of partition stays, but current partition info is not available since it's stale assertNotNull(metadata.fetch().partition(tp)); - assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); + assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); + assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); // Metadata with older epoch is rejected, metadata state is unchanged metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); assertNotNull(metadata.fetch().partition(tp)); - assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); + assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); + assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); // Metadata with equal or newer epoch is accepted metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 101); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 30L); assertNotNull(metadata.fetch().partition(tp)); - assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); + assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); + assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); } @Test @@ -585,18 +585,18 @@ public void testClusterCopy() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L); Cluster cluster = metadata.fetch(); - assertEquals(cluster.clusterResource().clusterId(), "dummy"); - assertEquals(cluster.nodes().size(), 4); + assertEquals("dummy", cluster.clusterResource().clusterId()); + assertEquals(4, cluster.nodes().size()); // topic counts assertEquals(cluster.invalidTopics(), Collections.singleton("topic3")); assertEquals(cluster.unauthorizedTopics(), Collections.singleton("topic4")); - assertEquals(cluster.topics().size(), 3); + assertEquals(3, cluster.topics().size()); assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME)); // partition counts - assertEquals(cluster.partitionsForTopic("topic1").size(), 2); - assertEquals(cluster.partitionsForTopic("topic2").size(), 3); + assertEquals(2, cluster.partitionsForTopic("topic1").size()); + assertEquals(3, cluster.partitionsForTopic("topic2").size()); // Sentinel instances InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 0); @@ -798,10 +798,10 @@ public void testNodeIfOffline() { TopicPartition tp = new TopicPartition("topic-1", 0); - assertOptional(metadata.fetch().nodeIfOnline(tp, 0), node -> assertEquals(node.id(), 0)); + assertOptional(metadata.fetch().nodeIfOnline(tp, 0), node -> assertEquals(0, node.id())); assertFalse(metadata.fetch().nodeIfOnline(tp, 1).isPresent()); - assertEquals(metadata.fetch().nodeById(0).id(), 0); - assertEquals(metadata.fetch().nodeById(1).id(), 1); + assertEquals(0, metadata.fetch().nodeById(0).id()); + assertEquals(1, metadata.fetch().nodeById(1).id()); } @Test @@ -831,7 +831,7 @@ public void testNodeIfOnlineNonExistentTopicPartition() { TopicPartition tp = new TopicPartition("topic-1", 0); - assertEquals(metadata.fetch().nodeById(0).id(), 0); + assertEquals(0, metadata.fetch().nodeById(0).id()); assertNull(metadata.fetch().partition(tp)); assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty()); } @@ -955,13 +955,13 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { // Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this // means that all of the "old" topics should be dropped. Cluster cluster = metadata.fetch(); - assertEquals(cluster.clusterResource().clusterId(), oldClusterId); - assertEquals(cluster.nodes().size(), oldNodes); - assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic"))); - assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic"))); - assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic"))); - assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2); - assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3); + assertEquals(oldClusterId, cluster.clusterResource().clusterId()); + assertEquals(oldNodes, cluster.nodes().size()); + assertEquals(cluster.invalidTopics(), Set.of("oldInvalidTopic", "keepInvalidTopic")); + assertEquals(cluster.unauthorizedTopics(), Set.of("oldUnauthorizedTopic", "keepUnauthorizedTopic")); + assertEquals(cluster.topics(), Set.of("oldValidTopic", "keepValidTopic")); + assertEquals(2, cluster.partitionsForTopic("oldValidTopic").size()); + assertEquals(3, cluster.partitionsForTopic("keepValidTopic").size()); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); String newClusterId = "newClusterId"; @@ -990,13 +990,13 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { assertNull(metadataTopicIds2.get("oldValidTopic")); cluster = metadata.fetch(); - assertEquals(cluster.clusterResource().clusterId(), newClusterId); + assertEquals(newClusterId, cluster.clusterResource().clusterId()); assertEquals(cluster.nodes().size(), newNodes); - assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic"))); - assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic"))); - assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic"))); - assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2); - assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4); + assertEquals(cluster.invalidTopics(), Set.of("keepInvalidTopic", "newInvalidTopic")); + assertEquals(cluster.unauthorizedTopics(), Set.of("keepUnauthorizedTopic", "newUnauthorizedTopic")); + assertEquals(cluster.topics(), Set.of("keepValidTopic", "newValidTopic")); + assertEquals(2, cluster.partitionsForTopic("keepValidTopic").size()); + assertEquals(4, cluster.partitionsForTopic("newValidTopic").size()); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); // Perform another metadata update, but this time all topic metadata should be cleared. @@ -1008,7 +1008,7 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName))); cluster = metadata.fetch(); - assertEquals(cluster.clusterResource().clusterId(), newClusterId); + assertEquals(newClusterId, cluster.clusterResource().clusterId()); assertEquals(cluster.nodes().size(), newNodes); assertEquals(cluster.invalidTopics(), Collections.emptySet()); assertEquals(cluster.unauthorizedTopics(), Collections.emptySet()); diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index cef48b65bb69f..e8dcf5843dcb8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -748,24 +748,6 @@ public void testConnectionThrottling() { assertEquals(0, client.throttleDelayMs(node, time.milliseconds())); } - // Creates expected ApiVersionsResponse from the specified node, where the max protocol version for the specified - // key is set to the specified version. - private ApiVersionsResponse createExpectedApiVersionsResponse(ApiKeys key, short maxVersion) { - ApiVersionCollection versionList = new ApiVersionCollection(); - for (ApiKeys apiKey : ApiKeys.values()) { - if (apiKey == key) { - versionList.add(new ApiVersion() - .setApiKey(apiKey.id) - .setMinVersion((short) 0) - .setMaxVersion(maxVersion)); - } else versionList.add(ApiVersionsResponse.toApiVersion(apiKey)); - } - return new ApiVersionsResponse(new ApiVersionsResponseData() - .setErrorCode(Errors.NONE.code()) - .setThrottleTimeMs(0) - .setApiKeys(versionList)); - } - private int sendEmptyProduceRequest() { return sendEmptyProduceRequest(client, node.idString()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java index 061982e34d360..02c094433703f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java @@ -31,6 +31,8 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; public class AdminClientTestUtils { @@ -163,21 +165,29 @@ public static ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult(Stri return new ListConsumerGroupOffsetsResult(Collections.singletonMap(CoordinatorKey.byGroupId(group), future)); } - public static ListClientMetricsResourcesResult listClientMetricsResourcesResult(String... names) { - return new ListClientMetricsResourcesResult( - KafkaFuture.completedFuture(Arrays.stream(names) - .map(ClientMetricsResourceListing::new) - .collect(Collectors.toList()))); + public static ListConfigResourcesResult listConfigResourcesResult(Map> resourceNames) { + Collection resources = resourceNames.entrySet().stream() + .flatMap(entry -> entry.getValue().stream() + .map(name -> new ConfigResource(entry.getKey(), name))) + .collect(Collectors.toList()); + return new ListConfigResourcesResult(KafkaFuture.completedFuture(resources)); } - public static ListClientMetricsResourcesResult listClientMetricsResourcesResult(KafkaException exception) { - final KafkaFutureImpl> future = new KafkaFutureImpl<>(); + public static ListConfigResourcesResult listConfigResourcesResult(String... names) { + return new ListConfigResourcesResult( + KafkaFuture.completedFuture(Arrays.stream(names) + .map(name -> new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name)) + .collect(Collectors.toList()))); + } + + public static ListConfigResourcesResult listConfigResourcesResult(KafkaException exception) { + final KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.completeExceptionally(exception); - return new ListClientMetricsResourcesResult(future); + return new ListConfigResourcesResult(future); } - public static ListShareGroupOffsetsResult createListShareGroupOffsetsResult(Map>> groupOffsets) { - Map>> coordinatorFutures = groupOffsets.entrySet().stream() + public static ListShareGroupOffsetsResult createListShareGroupOffsetsResult(Map>> groupOffsets) { + Map>> coordinatorFutures = groupOffsets.entrySet().stream() .collect(Collectors.toMap( entry -> CoordinatorKey.byGroupId(entry.getKey()), Map.Entry::getValue @@ -185,6 +195,22 @@ public static ListShareGroupOffsetsResult createListShareGroupOffsetsResult(Map< return new ListShareGroupOffsetsResult(coordinatorFutures); } + public static ListOffsetsResult createListOffsetsResult(Map partitionOffsets) { + Map> futures = + partitionOffsets.entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> KafkaFuture.completedFuture( + new ListOffsetsResult.ListOffsetsResultInfo( + entry.getValue().offset(), + System.currentTimeMillis(), + Optional.of(1) + ) + ) + )); + return new ListOffsetsResult(futures); + } + /** * Helper to create a KafkaAdminClient with a custom HostResolver accessible to tests outside this package. */ diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java index d09cca7ad665b..f3b1e73d72ef4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java @@ -64,7 +64,7 @@ public void shouldImplementEqualsProperly() { assertEquals(config, config); assertEquals(config, new Config(config.entries())); assertNotEquals(new Config(Collections.singletonList(E1)), config); - assertNotEquals(config, "this"); + assertNotEquals("this", config); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java index cd2250d365fad..1e577b8319a42 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java @@ -59,7 +59,7 @@ public void setUp() { } @Test - public void testTopLevelErrorConstructor() throws InterruptedException { + public void testTopLevelErrorConstructor() { partitionFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); DeleteConsumerGroupOffsetsResult topLevelErrorResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index 5d16bd0b9042f..9084a25836efc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -56,7 +56,6 @@ import org.apache.kafka.common.errors.DuplicateVoterException; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; -import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.GroupSubscribedToTopicException; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; @@ -141,7 +140,7 @@ import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; +import org.apache.kafka.common.message.ListConfigResourcesResponseData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListGroupsResponseData.ListedGroup; import org.apache.kafka.common.message.ListOffsetsResponseData; @@ -161,6 +160,7 @@ import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.RemoveRaftVoterRequestData; import org.apache.kafka.common.message.RemoveRaftVoterResponseData; import org.apache.kafka.common.message.ShareGroupDescribeResponseData; @@ -224,8 +224,8 @@ import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.LeaveGroupRequest; import org.apache.kafka.common.requests.LeaveGroupResponse; -import org.apache.kafka.common.requests.ListClientMetricsResourcesRequest; -import org.apache.kafka.common.requests.ListClientMetricsResourcesResponse; +import org.apache.kafka.common.requests.ListConfigResourcesRequest; +import org.apache.kafka.common.requests.ListConfigResourcesResponse; import org.apache.kafka.common.requests.ListGroupsRequest; import org.apache.kafka.common.requests.ListGroupsResponse; import org.apache.kafka.common.requests.ListOffsetsRequest; @@ -239,7 +239,6 @@ import org.apache.kafka.common.requests.OffsetDeleteResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.requests.RemoveRaftVoterRequest; import org.apache.kafka.common.requests.RemoveRaftVoterResponse; import org.apache.kafka.common.requests.RequestTestUtils; @@ -337,7 +336,6 @@ public class KafkaAdminClientTest { private static final Logger log = LoggerFactory.getLogger(KafkaAdminClientTest.class); private static final String GROUP_ID = "group-0"; - private static final int THROTTLE = 10; public static final Uuid REPLICA_DIRECTORY_ID = Uuid.randomUuid(); @Test @@ -391,10 +389,10 @@ public void testParseSuccessfulDescribeClusterResponse(boolean includeController assertNull(cluster.controller()); } assertEquals("Ek8tjqq1QBWfnaoyHFZqDg", cluster.clusterResource().clusterId()); - assertEquals(new HashSet<>(asList( + assertEquals(Set.of( new Node(0, "controller0.com", 9092), new Node(1, "controller1.com", 9092), - new Node(2, "controller2.com", 9092))), new HashSet<>(cluster.nodes())); + new Node(2, "controller2.com", 9092)), new HashSet<>(cluster.nodes())); } @Test @@ -503,7 +501,7 @@ public void testExplicitlyEnableTelemetryReporter() { .map(r -> (ClientTelemetryReporter) r) .collect(Collectors.toList()); - assertEquals(telemetryReporterList.size(), 1); + assertEquals(1, telemetryReporterList.size()); } } @@ -553,7 +551,8 @@ public void testCloseAdminClient() { * Test if admin client can be closed in the callback invoked when * an api call completes. If calling {@link Admin#close()} in callback, AdminClient thread hangs */ - @Test @Timeout(10) + @Test + @Timeout(10) public void testCloseAdminClientInCallback() throws InterruptedException { MockTime time = new MockTime(); AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0)); @@ -1593,7 +1592,7 @@ public void testDescribeTopicPartitionsApiWithAuthorizedOps() throws ExecutionEx Map topicDescriptions = result.allTopicNames().get(); TopicDescription topicDescription = topicDescriptions.get(topicName0); - assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), + assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), topicDescription.authorizedOperations()); } } @@ -2085,8 +2084,8 @@ public void testElectLeaders() throws Exception { electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); ElectLeadersResult results = env.adminClient().electLeaders( electionType, - new HashSet<>(asList(topic1, topic2))); - assertEquals(results.partitions().get().get(topic2).get().getClass(), ClusterAuthorizationException.class); + Set.of(topic1, topic2)); + assertEquals(ClusterAuthorizationException.class, results.partitions().get().get(topic2).get().getClass()); // Test a call where there are no errors. By mutating the internal of election results partition1Result.setErrorCode(ApiError.NONE.error().code()); @@ -2097,14 +2096,14 @@ public void testElectLeaders() throws Exception { env.kafkaClient().prepareResponse(new ElectLeadersResponse(0, Errors.NONE.code(), electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); - results = env.adminClient().electLeaders(electionType, new HashSet<>(asList(topic1, topic2))); + results = env.adminClient().electLeaders(electionType, Set.of(topic1, topic2)); assertFalse(results.partitions().get().get(topic1).isPresent()); assertFalse(results.partitions().get().get(topic2).isPresent()); // Now try a timeout results = env.adminClient().electLeaders( electionType, - new HashSet<>(asList(topic1, topic2)), + Set.of(topic1, topic2), new ElectLeadersOptions().timeoutMs(100)); TestUtils.assertFutureThrows(TimeoutException.class, results.partitions()); } @@ -2128,7 +2127,7 @@ public void testDescribeBrokerConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( broker0Resource, broker1Resource)).values(); - assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet()); + assertEquals(Set.of(broker0Resource, broker1Resource), result.keySet()); result.get(broker0Resource).get(); result.get(broker1Resource).get(); } @@ -2150,7 +2149,7 @@ public void testDescribeBrokerAndLogConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( brokerResource, brokerLoggerResource)).values(); - assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet()); + assertEquals(Set.of(brokerResource, brokerLoggerResource), result.keySet()); result.get(brokerResource).get(); result.get(brokerLoggerResource).get(); } @@ -2169,7 +2168,7 @@ public void testDescribeConfigsPartialResponse() { Map> result = env.adminClient().describeConfigs(asList( topic, topic2)).values(); - assertEquals(new HashSet<>(asList(topic, topic2)), result.keySet()); + assertEquals(Set.of(topic, topic2), result.keySet()); result.get(topic); TestUtils.assertFutureThrows(ApiException.class, result.get(topic2)); } @@ -2190,7 +2189,7 @@ public void testDescribeConfigsUnrequested() throws Exception { .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(singletonList( topic)).values(); - assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); + assertEquals(Set.of(topic), result.keySet()); assertNotNull(result.get(topic).get()); assertNull(result.get(unrequested)); } @@ -2213,7 +2212,7 @@ public void testDescribeClientMetricsConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( resource, resource1)).values(); - assertEquals(new HashSet<>(asList(resource, resource1)), result.keySet()); + assertEquals(Set.of(resource, resource1), result.keySet()); assertNotNull(result.get(resource).get()); assertNotNull(result.get(resource1).get()); } @@ -2240,7 +2239,7 @@ public void testDescribeConsumerGroupConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( resource1, resource2)).values(); - assertEquals(new HashSet<>(asList(resource1, resource2)), result.keySet()); + assertEquals(Set.of(resource1, resource2), result.keySet()); assertNotNull(result.get(resource1).get()); assertNotNull(result.get(resource2).get()); } @@ -2292,7 +2291,7 @@ private static DescribeLogDirsResponse prepareDescribeLogDirsResponse(Errors err private static DescribeLogDirsResponse prepareEmptyDescribeLogDirsResponse(Optional error) { DescribeLogDirsResponseData data = new DescribeLogDirsResponseData(); - if (error.isPresent()) data.setErrorCode(error.get().code()); + error.ifPresent(e -> data.setErrorCode(e.code())); return new DescribeLogDirsResponse(data); } @@ -2906,7 +2905,7 @@ public void testDescribeCluster() throws Exception { assertEquals(env.cluster().clusterResource().clusterId(), result2.clusterId().get()); assertEquals(new HashSet<>(env.cluster().nodes()), new HashSet<>(result2.nodes().get())); assertEquals(3, result2.controller().get().id()); - assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), + assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), result2.authorizedOperations().get()); } } @@ -3190,6 +3189,42 @@ public void testListGroupsEmptyGroupType() throws Exception { } } + @Test + public void testListGroupsWithProtocolTypes() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Test with list group options. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(), Set.of()), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable") + .setGroupType(GroupType.CONSUMER.toString()), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty") + .setGroupType(GroupType.CONSUMER.toString())))), + env.cluster().nodeById(0)); + + final ListGroupsOptions options = new ListGroupsOptions().withProtocolTypes(Set.of("")); + final ListGroupsResult result = env.adminClient().listGroups(options); + Collection listing = result.valid().get(); + + assertEquals(1, listing.size()); + List expected = new ArrayList<>(); + expected.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY))); + assertEquals(expected, listing); + assertEquals(0, result.errors().get().size()); + } + } + @Test public void testListGroupsWithTypes() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { @@ -3228,15 +3263,13 @@ public void testListGroupsWithTypes() throws Exception { } @Test - public void testListGroupsWithTypesOlderBrokerVersion() { + public void testListGroupsWithTypesOlderBrokerVersion() throws Exception { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) .setMaxVersion((short) 4); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); - - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); // Check that we cannot set a type filter with an older broker. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); @@ -3244,9 +3277,44 @@ public void testListGroupsWithTypesOlderBrokerVersion() { request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() ); - ListGroupsOptions options = new ListGroupsOptions().withTypes(singleton(GroupType.CLASSIC)); + ListGroupsOptions options = new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE)); ListGroupsResult result = env.adminClient().listGroups(options); TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + + // But a type filter which is just classic groups is permitted with an older broker, because they + // only know about classic groups so the types filter can be omitted. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(), Set.of()), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState(GroupState.STABLE.toString())))), + env.cluster().nodeById(0)); + + options = new ListGroupsOptions().withTypes(Set.of(GroupType.CLASSIC)); + result = env.adminClient().listGroups(options); + + Collection listing = result.all().get(); + assertEquals(1, listing.size()); + List expected = List.of( + new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE)) + ); + assertEquals(expected, listing); + + // But a type filter which is just consumer groups without classic groups is not permitted with an older broker. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + env.kafkaClient().prepareUnsupportedVersionResponse(request -> + request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() + ); + + options = new ListGroupsOptions().withTypes(Set.of(GroupType.CONSUMER)); + result = env.adminClient().listGroups(options); + TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); } } @@ -3268,7 +3336,6 @@ public void testDescribeClusterHandleUnsupportedVersionForIncludingFencedBrokers } @Test - @SuppressWarnings("removal") public void testListConsumerGroups() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), AdminClientConfig.RETRIES_CONFIG, "2")) { @@ -3276,89 +3343,441 @@ public void testListConsumerGroups() throws Exception { // Empty metadata response should be retried env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - Collections.emptyList(), - env.cluster().clusterResource().clusterId(), - -1, - Collections.emptyList())); + RequestTestUtils.metadataResponse( + List.of(), + env.cluster().clusterResource().clusterId(), + -1, + List.of())); env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - env.cluster().nodes(), - env.cluster().clusterResource().clusterId(), - env.cluster().controller().id(), - Collections.emptyList())); + RequestTestUtils.metadataResponse( + env.cluster().nodes(), + env.cluster().clusterResource().clusterId(), + env.cluster().controller().id(), + List.of())); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-1") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(0)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-1") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(0)); // handle retriable errors env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setGroups(Collections.emptyList()) + ), + env.cluster().nodeById(1)); + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) + .setGroups(Collections.emptyList()) + ), + env.cluster().nodeById(1)); + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(asList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-2") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(1)); + + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-3") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-3") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(2)); + + // fatal error + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) + .setGroups(Collections.emptyList())), + env.cluster().nodeById(3)); + + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forConsumerGroups()); + TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + + Collection listings = result.valid().get(); + assertEquals(3, listings.size()); + + Set groupIds = new HashSet<>(); + for (GroupListing listing : listings) { + groupIds.add(listing.groupId()); + assertTrue(listing.groupState().isPresent()); + } + + assertEquals(Set.of("group-1", "group-2", "group-3"), groupIds); + assertEquals(1, result.errors().get().size()); + } + } + + @Test + public void testListConsumerGroupsMetadataFailure() throws Exception { + final Cluster cluster = mockCluster(3, 0); + final Time time = new MockTime(); + + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, + AdminClientConfig.RETRIES_CONFIG, "0")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Empty metadata causes the request to fail since we have no list of brokers + // to send the ListGroups requests to + env.kafkaClient().prepareResponse( + RequestTestUtils.metadataResponse( + List.of(), + env.cluster().clusterResource().clusterId(), + -1, + List.of())); + + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forConsumerGroups()); + TestUtils.assertFutureThrows(KafkaException.class, result.all()); + } + } + + @Test + public void testListConsumerGroupsWithStates() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty")))), + env.cluster().nodeById(0)); + + final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups(); + final ListGroupsResult result = env.adminClient().listGroups(options); + Collection listings = result.valid().get(); + + assertEquals(2, listings.size()); + List expected = new ArrayList<>(); + expected.add(new GroupListing("group-2", Optional.empty(), "", Optional.of(GroupState.EMPTY))); + expected.add(new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + assertEquals(expected, listings); + assertEquals(0, result.errors().get().size()); + } + } + + @Test + public void testListConsumerGroupsWithProtocolTypes() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Test with a specific protocol type filter in list consumer group options. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString(), GroupType.CLASSIC.toString())), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable") + .setGroupType(GroupType.CONSUMER.toString()), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty") + .setGroupType(GroupType.CONSUMER.toString())))), + env.cluster().nodeById(0)); + + final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); + final ListGroupsResult result = env.adminClient().listGroups(options); + Collection listings = result.valid().get(); + + assertEquals(1, listings.size()); + List expected = new ArrayList<>(); + expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + assertEquals(expected, listings); + assertEquals(0, result.errors().get().size()); + } + } + + @Test + public void testListConsumerGroupsWithTypes() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Test with a specific state filter but no type filter in list consumer group options. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of(GroupType.CONSUMER.toString(), GroupType.CLASSIC.toString())), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable") + .setGroupType(GroupType.CLASSIC.toString())))), + env.cluster().nodeById(0)); + + final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); + final ListGroupsResult result = env.adminClient().listGroups(options); + Collection listings = result.valid().get(); + + assertEquals(1, listings.size()); + List expected = new ArrayList<>(); + expected.add(new GroupListing("group-1", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + assertEquals(expected, listings); + assertEquals(0, result.errors().get().size()); + + // Test with list consumer group options. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString())), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable") + .setGroupType(GroupType.CONSUMER.toString()), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty") + .setGroupType(GroupType.CONSUMER.toString())))), + env.cluster().nodeById(0)); + + final ListGroupsOptions options2 = ListGroupsOptions.forConsumerGroups().withTypes(Set.of(GroupType.CONSUMER)); + final ListGroupsResult result2 = env.adminClient().listGroups(options2); + Collection listings2 = result2.valid().get(); + + assertEquals(2, listings2.size()); + List expected2 = new ArrayList<>(); + expected2.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY))); + expected2.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + assertEquals(expected2, listings2); + assertEquals(0, result.errors().get().size()); + } + } + + @Test + public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exception { + ApiVersion listGroupV3 = new ApiVersion() + .setApiKey(ApiKeys.LIST_GROUPS.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV3))); + + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + // Check we can list groups v3 with older broker if we don't specify states, and use just consumer group types which can be omitted. + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), + env.cluster().nodeById(0)); + + ListGroupsOptions options = ListGroupsOptions.forConsumerGroups(); + ListGroupsResult result = env.adminClient().listGroups(options); + Collection listing = result.all().get(); + assertEquals(1, listing.size()); + List expected = List.of(new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty())); + assertEquals(expected, listing); + + // But we cannot set a state filter with older broker + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareUnsupportedVersionResponse(request -> + request instanceof ListGroupsRequest && + !((ListGroupsRequest) request).data().statesFilter().isEmpty() + ); + + options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); + result = env.adminClient().listGroups(options); + TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + } + } + + @Test + public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception { + ApiVersion listGroupV4 = new ApiVersion() + .setApiKey(ApiKeys.LIST_GROUPS.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 4); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); + + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + // Check if we can list groups v4 with older broker if we specify states and don't specify types. + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState(GroupState.STABLE.toString())))), + env.cluster().nodeById(0)); + + ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); + ListGroupsResult result = env.adminClient().listGroups(options); + + Collection listing = result.all().get(); + assertEquals(1, listing.size()); + List expected = List.of( + new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE)) + ); + assertEquals(expected, listing); + + // Check that we cannot set a type filter with an older broker. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + // First attempt to build request will require v5 (type filter), but the broker only supports v4 + env.kafkaClient().prepareUnsupportedVersionResponse(request -> + request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() + ); + + options = ListGroupsOptions.forConsumerGroups().withTypes(Set.of(GroupType.SHARE)); + result = env.adminClient().listGroups(options); + TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + } + } + + @Test + @SuppressWarnings("removal") + public void testListConsumerGroupsDeprecated() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), + AdminClientConfig.RETRIES_CONFIG, "2")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Empty metadata response should be retried + env.kafkaClient().prepareResponse( + RequestTestUtils.metadataResponse( + List.of(), + env.cluster().clusterResource().clusterId(), + -1, + List.of())); + + env.kafkaClient().prepareResponse( + RequestTestUtils.metadataResponse( + env.cluster().nodes(), + env.cluster().clusterResource().clusterId(), + env.cluster().controller().id(), + List.of())); + + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-1") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(0)); + + // handle retriable errors + env.kafkaClient().prepareResponseFrom( + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setGroups(List.of()) + ), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) + .setGroups(List.of()) + ), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-2") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-2") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-3") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-3") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(2)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-3") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-3") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(2)); // fatal error env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - .setGroups(Collections.emptyList())), - env.cluster().nodeById(3)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) + .setGroups(List.of())), + env.cluster().nodeById(3)); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); @@ -3379,22 +3798,22 @@ public void testListConsumerGroups() throws Exception { @Test @SuppressWarnings("removal") - public void testListConsumerGroupsMetadataFailure() throws Exception { + public void testListConsumerGroupsDeprecatedMetadataFailure() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { + AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Empty metadata causes the request to fail since we have no list of brokers // to send the ListGroups requests to env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - Collections.emptyList(), - env.cluster().clusterResource().clusterId(), - -1, - Collections.emptyList())); + RequestTestUtils.metadataResponse( + List.of(), + env.cluster().clusterResource().clusterId(), + -1, + List.of())); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); TestUtils.assertFutureThrows(KafkaException.class, result.all()); @@ -3403,7 +3822,7 @@ public void testListConsumerGroupsMetadataFailure() throws Exception { @Test @SuppressWarnings("removal") - public void testListConsumerGroupsWithStates() throws Exception { + public void testListConsumerGroupsDeprecatedWithStates() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -3412,14 +3831,14 @@ public void testListConsumerGroupsWithStates() throws Exception { env.kafkaClient().prepareResponseFrom( new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty")))), + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty")))), env.cluster().nodeById(0)); final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); @@ -3437,7 +3856,7 @@ public void testListConsumerGroupsWithStates() throws Exception { @Test @SuppressWarnings("removal") - public void testListConsumerGroupsWithTypes() throws Exception { + public void testListConsumerGroupsDeprecatedWithTypes() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -3445,10 +3864,10 @@ public void testListConsumerGroupsWithTypes() throws Exception { env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), + expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(singletonList( + .setGroups(List.of( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) @@ -3456,7 +3875,7 @@ public void testListConsumerGroupsWithTypes() throws Exception { .setGroupType(GroupType.CLASSIC.toString())))), env.cluster().nodeById(0)); - final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection listings = result.valid().get(); @@ -3470,10 +3889,10 @@ public void testListConsumerGroupsWithTypes() throws Exception { env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Collections.emptySet(), singleton(GroupType.CONSUMER.toString())), + expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString())), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(asList( + .setGroups(List.of( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) @@ -3500,30 +3919,31 @@ public void testListConsumerGroupsWithTypes() throws Exception { @Test @SuppressWarnings("removal") - public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exception { + public void testListConsumerGroupsDeprecatedWithStatesOlderBrokerVersion() throws Exception { ApiVersion listGroupV3 = new ApiVersion() - .setApiKey(ApiKeys.LIST_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); + .setApiKey(ApiKeys.LIST_GROUPS.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV3))); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV3))); env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); // Check we can list groups with older broker if we don't specify states env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(Collections.singletonList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), - env.cluster().nodeById(0)); + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), + env.cluster().nodeById(0)); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection listing = result.all().get(); assertEquals(1, listing.size()); - List expected = Collections.singletonList(new ConsumerGroupListing("group-1", false)); + List expected = List.of(new ConsumerGroupListing("group-1", false)); assertEquals(expected, listing); // But we cannot set a state filter with older broker @@ -3531,7 +3951,7 @@ public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exceptio env.kafkaClient().prepareUnsupportedVersionResponse( body -> body instanceof ListGroupsRequest); - options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); result = env.adminClient().listConsumerGroups(options); TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); } @@ -3539,34 +3959,34 @@ public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exceptio @Test @SuppressWarnings("removal") - public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception { + public void testListConsumerGroupsDeprecatedWithTypesOlderBrokerVersion() throws Exception { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) .setMaxVersion((short) 4); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); // Check if we can list groups with older broker if we specify states and don't specify types. env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), + expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(Collections.singletonList( + .setGroups(List.of( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) .setGroupState(GroupState.STABLE.toString())))), env.cluster().nodeById(0)); - ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection listing = result.all().get(); assertEquals(1, listing.size()); - List expected = Collections.singletonList( + List expected = List.of( new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false) ); assertEquals(expected, listing); @@ -3577,9 +3997,31 @@ public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() ); - options = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CLASSIC)); + options = new ListConsumerGroupsOptions().withTypes(Set.of(GroupType.SHARE)); result = env.adminClient().listConsumerGroups(options); TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + + // But a type filter which is just classic groups is permitted with an older broker, because they + // only know about classic groups so the types filter can be omitted. + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.kafkaClient().prepareResponseFrom( + expectListGroupsRequestWithFilters(Set.of(), Set.of()), + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(List.of( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState(GroupState.STABLE.toString())))), + env.cluster().nodeById(0)); + + options = new ListConsumerGroupsOptions().withTypes(Set.of(GroupType.CLASSIC)); + result = env.adminClient().listConsumerGroups(options); + + listing = result.all().get(); + assertEquals(1, listing.size()); + assertEquals(expected, listing); } } @@ -4396,7 +4838,7 @@ private void verifyListConsumerGroupOffsetsOptions() throws Exception { ClientRequest clientRequest = mockClient.requests().peek(); assertNotNull(clientRequest); assertEquals(300, clientRequest.requestTimeoutMs()); - OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).data; + OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); assertTrue(data.requireStable()); assertEquals(Collections.singletonList(GROUP_ID), data.groups().stream().map(OffsetFetchRequestGroup::groupId).collect(Collectors.toList())); @@ -4417,7 +4859,7 @@ public void testListConsumerGroupOffsetsNumRetries() throws Exception { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); @@ -4445,14 +4887,14 @@ public void testListConsumerGroupOffsetsRetryBackoff() throws Exception { mockClient.prepareResponse(body -> { firstAttemptTime.set(time.milliseconds()); return true; - }, offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); + }, offsetFetchResponse(Errors.NOT_COORDINATOR)); mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); mockClient.prepareResponse(body -> { secondAttemptTime.set(time.milliseconds()); return true; - }, offsetFetchResponse(Errors.NONE, Collections.emptyMap())); + }, offsetFetchResponse(Errors.NONE)); final KafkaFuture> future = env.adminClient().listConsumerGroupOffsets(GROUP_ID).partitionsToOffsetAndMetadata(); @@ -4481,7 +4923,7 @@ public void testListConsumerGroupOffsetsRetriableErrors() throws Exception { prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Collections.emptyMap())); + offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); /* * We need to return two responses here, one for NOT_COORDINATOR call when calling list consumer offsets @@ -4491,19 +4933,19 @@ public void testListConsumerGroupOffsetsRetriableErrors() throws Exception { * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response */ env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); + offsetFetchResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.NONE, Collections.emptyMap())); + offsetFetchResponse(Errors.NONE)); final ListConsumerGroupOffsetsResult errorResult1 = env.adminClient().listConsumerGroupOffsets(GROUP_ID); @@ -4525,7 +4967,7 @@ public void testListConsumerGroupOffsetsNonRetriableErrors() throws Exception { env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(offsetFetchResponse(error, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(error)); ListConsumerGroupOffsetsResult errorResult = env.adminClient().listConsumerGroupOffsets(GROUP_ID); @@ -4545,7 +4987,7 @@ public void testListConsumerGroupOffsets() throws Exception { env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); // Retriable errors should be retried - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); /* * We need to return two responses here, one for NOT_COORDINATOR error when calling list consumer group offsets @@ -4554,10 +4996,10 @@ public void testListConsumerGroupOffsets() throws Exception { * * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response */ - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); @@ -4565,16 +5007,31 @@ public void testListConsumerGroupOffsets() throws Exception { TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); - final Map responseData = new HashMap<>(); - responseData.put(myTopicPartition0, new OffsetFetchResponse.PartitionData(10, - Optional.empty(), "", Errors.NONE)); - responseData.put(myTopicPartition1, new OffsetFetchResponse.PartitionData(0, - Optional.empty(), "", Errors.NONE)); - responseData.put(myTopicPartition2, new OffsetFetchResponse.PartitionData(20, - Optional.empty(), "", Errors.NONE)); - responseData.put(myTopicPartition3, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, - Optional.empty(), "", Errors.NONE)); - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NONE, responseData)); + final OffsetFetchResponseData response = new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(GROUP_ID) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("my_topic") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(myTopicPartition0.partition()) + .setCommittedOffset(10), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(myTopicPartition1.partition()) + .setCommittedOffset(0), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(myTopicPartition2.partition()) + .setCommittedOffset(20), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(myTopicPartition3.partition()) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + )) + )) + )); + + env.kafkaClient().prepareResponse(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata().get(); @@ -4704,7 +5161,7 @@ public void testBatchedListConsumerGroupOffsetsWithNoOffsetFetchBatching() throw env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); // Prepare a response to force client to attempt batched request creation that throws // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); @@ -4737,7 +5194,7 @@ public void testBatchedListStreamsGroupOffsetsWithNoOffsetFetchBatching() throws env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); // Prepare a response to force client to attempt batched request creation that throws // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); ListStreamsGroupOffsetsResult result = env.adminClient().listStreamsGroupOffsets(groupSpecs); @@ -4791,44 +5248,60 @@ private void sendOffsetFetchResponse(MockClient mockClient, Map> results = new HashMap<>(); - Map errors = new HashMap<>(); - data.groups().forEach(group -> { - Map partitionResults = new HashMap<>(); - for (TopicPartition tp : groupSpecs.get(group.groupId()).topicPartitions()) { - partitionResults.put(tp, new PartitionData(10, Optional.empty(), "", Errors.NONE)); - } - results.put(group.groupId(), partitionResults); - errors.put(group.groupId(), error); - }); + OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); + if (!batched) { assertEquals(1, data.groups().size()); - mockClient.respond(new OffsetFetchResponse(THROTTLE, error, results.values().iterator().next())); - } else - mockClient.respond(new OffsetFetchResponse(THROTTLE, errors, results)); + } + + OffsetFetchResponseData response = new OffsetFetchResponseData() + .setGroups(data.groups().stream().map(group -> + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group.groupId()) + .setErrorCode(error.code()) + .setTopics(groupSpecs.get(group.groupId()).topicPartitions().stream() + .collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(entry.getKey()) + .setPartitions(entry.getValue().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition.partition()) + .setCommittedOffset(10) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())); + + mockClient.respond(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); } private void sendStreamsOffsetFetchResponse(MockClient mockClient, Map groupSpecs, boolean batched, Errors error) throws Exception { waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); ClientRequest clientRequest = mockClient.requests().peek(); - OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).data; - Map> results = new HashMap<>(); - Map errors = new HashMap<>(); - data.groups().forEach(group -> { - Map partitionResults = new HashMap<>(); - for (TopicPartition tp : groupSpecs.get(group.groupId()).topicPartitions()) { - partitionResults.put(tp, new PartitionData(10, Optional.empty(), "", Errors.NONE)); - } - results.put(group.groupId(), partitionResults); - errors.put(group.groupId(), error); - }); + OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); + if (!batched) { assertEquals(1, data.groups().size()); - mockClient.respond(new OffsetFetchResponse(THROTTLE, error, results.values().iterator().next())); - } else - mockClient.respond(new OffsetFetchResponse(THROTTLE, errors, results)); + } + + OffsetFetchResponseData response = new OffsetFetchResponseData() + .setGroups(data.groups().stream().map(group -> + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group.groupId()) + .setErrorCode(error.code()) + .setTopics(groupSpecs.get(group.groupId()).topicPartitions().stream() + .collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(entry.getKey()) + .setPartitions(entry.getValue().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition.partition()) + .setCommittedOffset(10) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())); + + mockClient.respond(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); } private void verifyListOffsetsForMultipleGroups(Map groupSpecs, @@ -6087,7 +6560,7 @@ public void testListStreamsGroups() throws Exception { .setGroups(Collections.emptyList())), env.cluster().nodeById(3)); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.STREAMS))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); Collection listings = result.valid().get(); @@ -6122,7 +6595,7 @@ public void testListStreamsGroupsMetadataFailure() throws Exception { -1, Collections.emptyList())); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.STREAMS))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); TestUtils.assertFutureThrows(KafkaException.class, result.all()); } } @@ -6150,7 +6623,7 @@ public void testListStreamsGroupsWithStates() throws Exception { .setGroupState("NotReady")))), env.cluster().nodeById(0)); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.STREAMS))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); Collection listings = result.valid().get(); assertEquals(2, listings.size()); @@ -6181,7 +6654,7 @@ public void testListStreamsGroupsWithStatesOlderBrokerVersion() { new ListGroupsResponseData.ListedGroup() .setGroupId("streams-group-1")))), env.cluster().nodeById(0)); - ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.STREAMS))); + ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); } } @@ -6497,7 +6970,7 @@ public void testListShareGroups() throws Exception { .setGroups(Collections.emptyList())), env.cluster().nodeById(3)); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); Collection listings = result.valid().get(); @@ -6532,7 +7005,7 @@ public void testListShareGroupsMetadataFailure() throws Exception { -1, Collections.emptyList())); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); TestUtils.assertFutureThrows(KafkaException.class, result.all()); } } @@ -6560,7 +7033,7 @@ public void testListShareGroupsWithStates() throws Exception { .setGroupState("Empty")))), env.cluster().nodeById(0)); - final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); Collection listings = result.valid().get(); assertEquals(2, listings.size()); @@ -6591,7 +7064,7 @@ public void testListShareGroupsWithStatesOlderBrokerVersion() { new ListGroupsResponseData.ListedGroup() .setGroupId("share-group-1")))), env.cluster().nodeById(0)); - ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); } } @@ -7449,7 +7922,7 @@ public void testListPartitionReassignments() throws Exception { .setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message()); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData)); - ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(asList(tp1, tp2))); + ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(Set.of(tp1, tp2)); TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, unknownTpResult.reassignments()); // 3. Success @@ -8257,6 +8730,34 @@ public void testListOffsetsLatestTierSpecSpecMinVersion() throws Exception { } } + @Test + public void testListOffsetsEarliestPendingUploadSpecSpecMinVersion() throws Exception { + Node node = new Node(0, "localhost", 8120); + List nodes = Collections.singletonList(node); + List pInfos = new ArrayList<>(); + pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node})); + final Cluster cluster = new Cluster( + "mockClusterId", + nodes, + pInfos, + Collections.emptySet(), + Collections.emptySet(), + node); + final TopicPartition tp0 = new TopicPartition("foo", 0); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, + AdminClientConfig.RETRIES_CONFIG, "2")) { + + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + + env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.earliestPendingUpload())); + + TestUtils.waitForCondition(() -> env.kafkaClient().requests().stream().anyMatch(request -> + request.requestBuilder().apiKey().messageType == ApiMessageType.LIST_OFFSETS && request.requestBuilder().oldestAllowedVersion() == 11 + ), "no listOffsets request has the expected oldestAllowedVersion"); + } + } + private Map makeTestFeatureUpdates() { return Utils.mkMap( Utils.mkEntry("test_feature_1", new FeatureUpdate((short) 2, FeatureUpdate.UpgradeType.UPGRADE)), @@ -8389,7 +8890,7 @@ public void testDescribeFeaturesFailure() { options.timeoutMs(10000); final KafkaFuture future = env.adminClient().describeFeatures(options).featureMetadata(); final ExecutionException e = assertThrows(ExecutionException.class, future::get); - assertEquals(e.getCause().getClass(), Errors.INVALID_REQUEST.exception().getClass()); + assertEquals(Errors.INVALID_REQUEST.exception().getClass(), e.getCause().getClass()); } } @@ -8978,15 +9479,15 @@ public void testDescribeClientQuotas() throws Exception { DescribeClientQuotasResult result = env.adminClient().describeClientQuotas(filter); Map> resultData = result.entities().get(); - assertEquals(resultData.size(), 2); + assertEquals(2, resultData.size()); assertTrue(resultData.containsKey(entity1)); Map config1 = resultData.get(entity1); - assertEquals(config1.size(), 1); - assertEquals(config1.get("consumer_byte_rate"), 10000.0, 1e-6); + assertEquals(1, config1.size()); + assertEquals(10000.0, config1.get("consumer_byte_rate"), 1e-6); assertTrue(resultData.containsKey(entity2)); Map config2 = resultData.get(entity2); - assertEquals(config2.size(), 1); - assertEquals(config2.get("producer_byte_rate"), 20000.0, 1e-6); + assertEquals(1, config2.size()); + assertEquals(20000.0, config2.get("producer_byte_rate"), 1e-6); } } @@ -9451,9 +9952,7 @@ public void testUnregisterBrokerTimeoutMaxWait() { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); - UnregisterBrokerOptions options = new UnregisterBrokerOptions(); - options.timeoutMs = 10; - UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId, options); + UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId, new UnregisterBrokerOptions().timeoutMs(10)); // Validate response assertNotNull(result.all()); @@ -10158,10 +10657,16 @@ private DescribeLogDirsResponse prepareDescribeLogDirsResponse(Errors error, Str .setLogDir(logDir)))); } - private OffsetFetchResponse offsetFetchResponse(Errors error, Map responseData) { - return new OffsetFetchResponse(THROTTLE, - Collections.singletonMap(GROUP_ID, error), - Collections.singletonMap(GROUP_ID, responseData)); + private static OffsetFetchResponse offsetFetchResponse(Errors error) { + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(GROUP_ID) + .setErrorCode(error.code()) + )), + ApiKeys.OFFSET_FETCH.latestVersion() + ); } private static MemberDescription convertToMemberDescriptions(DescribedGroupMember member, @@ -10185,6 +10690,7 @@ private static ShareMemberDescription convertToShareMemberDescriptions(ShareGrou member.memberEpoch()); } + @SuppressWarnings({"deprecation", "removal"}) @Test public void testListClientMetricsResources() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { @@ -10193,45 +10699,55 @@ public void testListClientMetricsResources() throws Exception { new ClientMetricsResourceListing("two") ); - ListClientMetricsResourcesResponseData responseData = - new ListClientMetricsResourcesResponseData().setErrorCode(Errors.NONE.code()); + ListConfigResourcesResponseData responseData = + new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); - responseData.clientMetricsResources() - .add(new ListClientMetricsResourcesResponseData.ClientMetricsResource().setName("one")); - responseData.clientMetricsResources() - .add((new ListClientMetricsResourcesResponseData.ClientMetricsResource()).setName("two")); + responseData.configResources() + .add(new ListConfigResourcesResponseData + .ConfigResource() + .setResourceName("one") + .setResourceType(ConfigResource.Type.CLIENT_METRICS.id()) + ); + responseData.configResources() + .add(new ListConfigResourcesResponseData + .ConfigResource() + .setResourceName("two") + .setResourceType(ConfigResource.Type.CLIENT_METRICS.id()) + ); env.kafkaClient().prepareResponse( - request -> request instanceof ListClientMetricsResourcesRequest, - new ListClientMetricsResourcesResponse(responseData)); + request -> request instanceof ListConfigResourcesRequest, + new ListConfigResourcesResponse(responseData)); ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); } } + @SuppressWarnings({"deprecation", "removal"}) @Test public void testListClientMetricsResourcesEmpty() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { List expected = Collections.emptyList(); - ListClientMetricsResourcesResponseData responseData = - new ListClientMetricsResourcesResponseData().setErrorCode(Errors.NONE.code()); + ListConfigResourcesResponseData responseData = + new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); env.kafkaClient().prepareResponse( - request -> request instanceof ListClientMetricsResourcesRequest, - new ListClientMetricsResourcesResponse(responseData)); + request -> request instanceof ListConfigResourcesRequest, + new ListConfigResourcesResponse(responseData)); ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); } } + @SuppressWarnings({"deprecation", "removal"}) @Test public void testListClientMetricsResourcesNotSupported() { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().prepareResponse( - request -> request instanceof ListClientMetricsResourcesRequest, + request -> request instanceof ListConfigResourcesRequest, prepareListClientMetricsResourcesResponse(Errors.UNSUPPORTED_VERSION)); ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); @@ -10242,6 +10758,70 @@ public void testListClientMetricsResourcesNotSupported() { } } + @Test + public void testListConfigResources() throws Exception { + try (AdminClientUnitTestEnv env = mockClientEnv()) { + List expected = List.of( + new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "client-metrics"), + new ConfigResource(ConfigResource.Type.BROKER, "1"), + new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "1"), + new ConfigResource(ConfigResource.Type.TOPIC, "topic"), + new ConfigResource(ConfigResource.Type.GROUP, "group") + ); + + ListConfigResourcesResponseData responseData = + new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); + + expected.forEach(c -> + responseData.configResources() + .add(new ListConfigResourcesResponseData + .ConfigResource() + .setResourceName(c.name()) + .setResourceType(c.type().id()) + ) + ); + + env.kafkaClient().prepareResponse( + request -> request instanceof ListConfigResourcesRequest, + new ListConfigResourcesResponse(responseData)); + + ListConfigResourcesResult result = env.adminClient().listConfigResources(); + assertEquals(expected.size(), result.all().get().size()); + assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); + } + } + + @Test + public void testListConfigResourcesEmpty() throws Exception { + try (AdminClientUnitTestEnv env = mockClientEnv()) { + ListConfigResourcesResponseData responseData = + new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); + + env.kafkaClient().prepareResponse( + request -> request instanceof ListConfigResourcesRequest, + new ListConfigResourcesResponse(responseData)); + + ListConfigResourcesResult result = env.adminClient().listConfigResources(); + assertTrue(result.all().get().isEmpty()); + } + } + + @Test + public void testListConfigResourcesNotSupported() { + try (AdminClientUnitTestEnv env = mockClientEnv()) { + env.kafkaClient().prepareResponse( + request -> request instanceof ListConfigResourcesRequest, + new ListConfigResourcesResponse(new ListConfigResourcesResponseData() + .setErrorCode(Errors.UNSUPPORTED_VERSION.code()))); + + ListConfigResourcesResult result = env.adminClient().listConfigResources( + Set.of(ConfigResource.Type.UNKNOWN), new ListConfigResourcesOptions()); + + assertNotNull(result.all()); + TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + } + } + @Test public void testCallFailWithUnsupportedVersionExceptionDoesNotHaveConcurrentModificationException() throws InterruptedException { Cluster cluster = mockCluster(1, 0); @@ -10297,8 +10877,8 @@ public void update(Time time, MockClient.MetadataUpdate update) { } } - private static ListClientMetricsResourcesResponse prepareListClientMetricsResourcesResponse(Errors error) { - return new ListClientMetricsResourcesResponse(new ListClientMetricsResourcesResponseData() + private static ListConfigResourcesResponse prepareListClientMetricsResourcesResponse(Errors error) { + return new ListConfigResourcesResponse(new ListConfigResourcesResponseData() .setErrorCode(error.code())); } @@ -10577,12 +11157,24 @@ public void testListShareGroupOffsets() throws Exception { List.of( new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500))) + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50).setLeaderEpoch(1) + ) + ), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100).setLeaderEpoch(2) + ) + ), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(3) + ) + ) ) ) ) @@ -10590,15 +11182,15 @@ public void testListShareGroupOffsets() throws Exception { env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffset(GROUP_ID).get(); + final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); assertEquals(6, partitionToOffsetAndMetadata.size()); - assertEquals(10, partitionToOffsetAndMetadata.get(myTopicPartition0)); - assertEquals(11, partitionToOffsetAndMetadata.get(myTopicPartition1)); - assertEquals(40, partitionToOffsetAndMetadata.get(myTopicPartition2)); - assertEquals(50, partitionToOffsetAndMetadata.get(myTopicPartition3)); - assertEquals(100, partitionToOffsetAndMetadata.get(myTopicPartition4)); - assertEquals(500, partitionToOffsetAndMetadata.get(myTopicPartition5)); + assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition0)); + assertEquals(new OffsetAndMetadata(11, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition1)); + assertEquals(new OffsetAndMetadata(40, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition2)); + assertEquals(new OffsetAndMetadata(50, Optional.of(1), ""), partitionToOffsetAndMetadata.get(myTopicPartition3)); + assertEquals(new OffsetAndMetadata(100, Optional.of(2), ""), partitionToOffsetAndMetadata.get(myTopicPartition4)); + assertEquals(new OffsetAndMetadata(500, Optional.of(3), ""), partitionToOffsetAndMetadata.get(myTopicPartition5)); } } @@ -10630,16 +11222,28 @@ public void testListShareGroupOffsetsMultipleGroups() throws Exception { List.of( new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50))) + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50).setLeaderEpoch(1) + ) + ) ) ), new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId("group-1").setTopics( List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500))) + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100).setLeaderEpoch(2) + ) + ), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(2) + ) + ) ) ) ) @@ -10649,17 +11253,17 @@ public void testListShareGroupOffsetsMultipleGroups() throws Exception { final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); assertEquals(2, result.all().get().size()); - final Map partitionToOffsetAndMetadataGroup0 = result.partitionsToOffset(GROUP_ID).get(); + final Map partitionToOffsetAndMetadataGroup0 = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); assertEquals(4, partitionToOffsetAndMetadataGroup0.size()); - assertEquals(10, partitionToOffsetAndMetadataGroup0.get(myTopicPartition0)); - assertEquals(11, partitionToOffsetAndMetadataGroup0.get(myTopicPartition1)); - assertEquals(40, partitionToOffsetAndMetadataGroup0.get(myTopicPartition2)); - assertEquals(50, partitionToOffsetAndMetadataGroup0.get(myTopicPartition3)); + assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition0)); + assertEquals(new OffsetAndMetadata(11, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition1)); + assertEquals(new OffsetAndMetadata(40, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition2)); + assertEquals(new OffsetAndMetadata(50, Optional.of(1), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition3)); - final Map partitionToOffsetAndMetadataGroup1 = result.partitionsToOffset("group-1").get(); + final Map partitionToOffsetAndMetadataGroup1 = result.partitionsToOffsetAndMetadata("group-1").get(); assertEquals(2, partitionToOffsetAndMetadataGroup1.size()); - assertEquals(100, partitionToOffsetAndMetadataGroup1.get(myTopicPartition4)); - assertEquals(500, partitionToOffsetAndMetadataGroup1.get(myTopicPartition5)); + assertEquals(new OffsetAndMetadata(100, Optional.of(2), ""), partitionToOffsetAndMetadataGroup1.get(myTopicPartition4)); + assertEquals(new OffsetAndMetadata(500, Optional.of(2), ""), partitionToOffsetAndMetadataGroup1.get(myTopicPartition5)); } } @@ -10682,7 +11286,7 @@ public void testListShareGroupOffsetsEmpty() throws Exception { env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffset(GROUP_ID).get(); + final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); assertEquals(0, partitionToOffsetAndMetadata.size()); } @@ -10711,12 +11315,22 @@ public void testListShareGroupOffsetsWithErrorInOnePartition() throws Exception List.of( new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions(List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11) - )), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setErrorCode(Errors.NOT_COORDINATOR.code()).setErrorMessage("Not a Coordinator"))), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions(List.of(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500))) + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(1) + ) + ), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setErrorCode(Errors.NOT_COORDINATOR.code()).setErrorMessage("Not a Coordinator") + ) + ), + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( + List.of( + new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(2) + ) + ) ) ) ) @@ -10724,13 +11338,13 @@ public void testListShareGroupOffsetsWithErrorInOnePartition() throws Exception env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffset(GROUP_ID).get(); + final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); // For myTopicPartition2 we have set an error as the response. Thus, it should be skipped from the final result assertEquals(3, partitionToOffsetAndMetadata.size()); - assertEquals(10, partitionToOffsetAndMetadata.get(myTopicPartition0)); - assertEquals(11, partitionToOffsetAndMetadata.get(myTopicPartition1)); - assertEquals(500, partitionToOffsetAndMetadata.get(myTopicPartition3)); + assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition0)); + assertEquals(new OffsetAndMetadata(11, Optional.of(1), ""), partitionToOffsetAndMetadata.get(myTopicPartition1)); + assertEquals(new OffsetAndMetadata(500, Optional.of(2), ""), partitionToOffsetAndMetadata.get(myTopicPartition3)); } } @@ -10741,10 +11355,10 @@ public void testAlterShareGroupOffsets() throws Exception { env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setResponses( - List.of( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0), new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(1))), new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0))) - ) + ).iterator()) ); TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); @@ -10763,6 +11377,28 @@ public void testAlterShareGroupOffsets() throws Exception { } } + @Test + public void testAlterShareGroupOffsetsWithTopLevelError() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()).setErrorMessage("Group authorization failed."); + + TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); + TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); + TopicPartition barPartition0 = new TopicPartition("bar", 0); + TopicPartition zooTopicPartition0 = new TopicPartition("zoo", 0); + + env.kafkaClient().prepareResponse(new AlterShareGroupOffsetsResponse(data)); + final AlterShareGroupOffsetsResult result = env.adminClient().alterShareGroupOffsets(GROUP_ID, Map.of(fooTopicPartition0, 1L, fooTopicPartition1, 2L, barPartition0, 1L)); + + TestUtils.assertFutureThrows(GroupAuthorizationException.class, result.all()); + TestUtils.assertFutureThrows(GroupAuthorizationException.class, result.partitionResult(fooTopicPartition1)); + TestUtils.assertFutureThrows(IllegalArgumentException.class, result.partitionResult(zooTopicPartition0)); + } + } + @Test public void testAlterShareGroupOffsetsWithErrorInOnePartition() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { @@ -10770,10 +11406,11 @@ public void testAlterShareGroupOffsetsWithErrorInOnePartition() throws Exception env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setResponses( - List.of( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0), new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(1).setErrorCode(Errors.NON_EMPTY_GROUP.code()).setErrorMessage("The group is not empty"))), + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0), + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(1).setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()).setErrorMessage("Topic authorization failed."))), new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0))) - ) + ).iterator()) ); TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); @@ -10783,9 +11420,9 @@ public void testAlterShareGroupOffsetsWithErrorInOnePartition() throws Exception env.kafkaClient().prepareResponse(new AlterShareGroupOffsetsResponse(data)); final AlterShareGroupOffsetsResult result = env.adminClient().alterShareGroupOffsets(GROUP_ID, Map.of(fooTopicPartition0, 1L, fooTopicPartition1, 2L, barPartition0, 1L)); - TestUtils.assertFutureThrows(GroupNotEmptyException.class, result.all()); + TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.all()); assertNull(result.partitionResult(fooTopicPartition0).get()); - TestUtils.assertFutureThrows(GroupNotEmptyException.class, result.partitionResult(fooTopicPartition1)); + TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.partitionResult(fooTopicPartition1)); assertNull(result.partitionResult(barPartition0).get()); } } @@ -10801,10 +11438,10 @@ public void testDeleteShareGroupOffsetsOptionsWithBatchedApi() throws Exception env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final Set partitions = Collections.singleton(new TopicPartition("A", 0)); + final Set topics = Collections.singleton("A"); final DeleteShareGroupOffsetsOptions options = new DeleteShareGroupOffsetsOptions(); - env.adminClient().deleteShareGroupOffsets(GROUP_ID, partitions, options); + env.adminClient().deleteShareGroupOffsets(GROUP_ID, topics, options); final MockClient mockClient = env.kafkaClient(); waitForRequest(mockClient, ApiKeys.DELETE_SHARE_GROUP_OFFSETS); @@ -10825,26 +11462,27 @@ public void testDeleteShareGroupOffsets() throws Exception { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + Uuid fooId = Uuid.randomUuid(); + String fooName = "foo"; + Uuid barId = Uuid.randomUuid(); + String barName = "bar"; + + String zooName = "zoo"; + DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0), new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(1))), - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0))) + new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName(fooName).setTopicId(fooId), + new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName(barName).setTopicId(barId) ) ); - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barPartition0 = new TopicPartition("bar", 0); - TopicPartition zooTopicPartition0 = new TopicPartition("zoo", 0); - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooTopicPartition0, fooTopicPartition1, barPartition0)); + final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); assertNull(result.all().get()); - assertNull(result.partitionResult(fooTopicPartition0).get()); - assertNull(result.partitionResult(fooTopicPartition1).get()); - assertNull(result.partitionResult(barPartition0).get()); - assertThrows(IllegalArgumentException.class, () -> result.partitionResult(zooTopicPartition0)); + assertNull(result.topicResult(fooName).get()); + assertNull(result.topicResult(barName).get()); + assertThrows(IllegalArgumentException.class, () -> result.topicResult(zooName)); } } @@ -10856,7 +11494,7 @@ public void testDeleteShareGroupOffsetsEmpty() throws Exception { env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( - Collections.emptyList() + List.of() ); env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); @@ -10875,41 +11513,46 @@ public void testDeleteShareGroupOffsetsWithErrorInGroup() throws Exception { .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()) .setErrorMessage(Errors.GROUP_AUTHORIZATION_FAILED.message()); - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barTopicPartition0 = new TopicPartition("bar", 0); + String fooName = "foo"; + String barName = "bar"; env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooTopicPartition0, fooTopicPartition1, barTopicPartition0)); + final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); TestUtils.assertFutureThrows(Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass(), result.all()); } } @Test - public void testDeleteShareGroupOffsetsWithErrorInOnePartition() throws Exception { + public void testDeleteShareGroupOffsetsWithErrorInOneTopic() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + Uuid fooId = Uuid.randomUuid(); + String fooName = "foo"; + Uuid barId = Uuid.randomUuid(); + String barName = "bar"; + DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0), new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(1).setErrorCode(Errors.KAFKA_STORAGE_ERROR.code()).setErrorMessage(Errors.KAFKA_STORAGE_ERROR.message()))), - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0))) + new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() + .setTopicName(fooName) + .setTopicId(fooId) + .setErrorCode(Errors.KAFKA_STORAGE_ERROR.code()) + .setErrorMessage(Errors.KAFKA_STORAGE_ERROR.message()), + new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() + .setTopicName(barName) + .setTopicId(barId) ) ); - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barTopicPartition0 = new TopicPartition("bar", 0); - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooTopicPartition0, fooTopicPartition1, barTopicPartition0)); + final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); TestUtils.assertFutureThrows(Errors.KAFKA_STORAGE_ERROR.exception().getClass(), result.all()); - assertNull(result.partitionResult(fooTopicPartition0).get()); - TestUtils.assertFutureThrows(Errors.KAFKA_STORAGE_ERROR.exception().getClass(), result.partitionResult(fooTopicPartition1)); - assertNull(result.partitionResult(barTopicPartition0).get()); + TestUtils.assertFutureThrows(Errors.KAFKA_STORAGE_ERROR.exception().getClass(), result.topicResult(fooName)); + assertNull(result.topicResult(barName).get()); } } @@ -10919,24 +11562,25 @@ public void testDeleteShareGroupOffsetsWithPartitionNotPresentInResult() throws env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + Uuid fooId = Uuid.randomUuid(); + String fooName = "foo"; + + String barName = "bar"; + DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0), new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(1))), - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition().setPartitionIndex(0))) + new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() + .setTopicName(fooName) + .setTopicId(fooId) ) ); - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barTopicPartition0 = new TopicPartition("bar", 0); - TopicPartition barTopicPartition1 = new TopicPartition("bar", 1); - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooTopicPartition0, fooTopicPartition1, barTopicPartition0)); + final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName)); assertDoesNotThrow(() -> result.all().get()); - assertThrows(IllegalArgumentException.class, () -> result.partitionResult(barTopicPartition1)); - assertNull(result.partitionResult(barTopicPartition0).get()); + assertThrows(IllegalArgumentException.class, () -> result.topicResult(barName)); + assertNull(result.topicResult(fooName).get()); } } @@ -11051,4 +11695,27 @@ private static StreamsGroupDescribeResponseData makeFullStreamsGroupDescribeResp .setAssignmentEpoch(1)); return data; } + + @Test + @Timeout(30) + public void testDescribeTopicsTimeoutWhenNoBrokerResponds() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv( + mockCluster(1, 0), + AdminClientConfig.RETRIES_CONFIG, "0", + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + // Not using prepareResponse is equivalent to "no brokers respond". + long start = System.currentTimeMillis(); + DescribeTopicsResult result = env.adminClient().describeTopics(List.of("test-topic"), new DescribeTopicsOptions().timeoutMs(200)); + Map> topicDescriptionMap = result.topicNameValues(); + KafkaFuture topicDescription = topicDescriptionMap.get("test-topic"); + ExecutionException exception = assertThrows(ExecutionException.class, topicDescription::get); + // Duration should be greater than or equal to 200 ms but less than 30000 ms. + long duration = System.currentTimeMillis() - start; + + assertInstanceOf(TimeoutException.class, exception.getCause()); + assertTrue(duration >= 150L && duration < 30000); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java index 75d6c1c88c537..f20d6e56c9595 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java @@ -20,8 +20,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.HashSet; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -29,7 +27,7 @@ public class ListConsumerGroupsOptionsTest { @Test public void testState() { - Set consumerGroupStates = new HashSet<>(Arrays.asList(ConsumerGroupState.values())); + Set consumerGroupStates = Set.of(ConsumerGroupState.values()); ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(consumerGroupStates); assertEquals(consumerGroupStates, options.states()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListGroupsOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListGroupsOptionsTest.java new file mode 100644 index 0000000000000..360da83b8da57 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListGroupsOptionsTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; +import org.apache.kafka.common.GroupState; +import org.apache.kafka.common.GroupType; + +import org.junit.jupiter.api.Test; + +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ListGroupsOptionsTest { + @Test + public void testForConsumerGroups() { + ListGroupsOptions options = ListGroupsOptions.forConsumerGroups(); + assertTrue(options.groupStates().isEmpty()); + assertEquals(Set.of(GroupType.CONSUMER, GroupType.CLASSIC), options.types()); + assertEquals(Set.of("", ConsumerProtocol.PROTOCOL_TYPE), options.protocolTypes()); + + options.inGroupStates(Set.of(GroupState.STABLE)); + options.withTypes(Set.of(GroupType.CONSUMER)); + options.withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); + assertEquals(Set.of(GroupState.STABLE), options.groupStates()); + assertEquals(Set.of(GroupType.CONSUMER), options.types()); + assertEquals(Set.of(ConsumerProtocol.PROTOCOL_TYPE), options.protocolTypes()); + } + + @Test + public void testForShareGroups() { + ListGroupsOptions options = ListGroupsOptions.forShareGroups(); + assertTrue(options.groupStates().isEmpty()); + assertEquals(Set.of(GroupType.SHARE), options.types()); + assertTrue(options.protocolTypes().isEmpty()); + + options.inGroupStates(Set.of(GroupState.STABLE)); + options.withTypes(Set.of(GroupType.CONSUMER)); + options.withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); + assertEquals(Set.of(GroupState.STABLE), options.groupStates()); + assertEquals(Set.of(GroupType.CONSUMER), options.types()); + assertEquals(Set.of(ConsumerProtocol.PROTOCOL_TYPE), options.protocolTypes()); + } + + @Test + public void testForStreamsGroups() { + ListGroupsOptions options = ListGroupsOptions.forStreamsGroups(); + assertTrue(options.groupStates().isEmpty()); + assertEquals(Set.of(GroupType.STREAMS), options.types()); + assertTrue(options.protocolTypes().isEmpty()); + + options.inGroupStates(Set.of(GroupState.STABLE)); + options.withTypes(Set.of(GroupType.CONSUMER)); + options.withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); + assertEquals(Set.of(GroupState.STABLE), options.groupStates()); + assertEquals(Set.of(GroupType.CONSUMER), options.types()); + assertEquals(Set.of(ConsumerProtocol.PROTOCOL_TYPE), options.protocolTypes()); + } + + @Test + public void testGroupStates() { + ListGroupsOptions options = new ListGroupsOptions(); + assertTrue(options.groupStates().isEmpty()); + + options.inGroupStates(Set.of(GroupState.DEAD)); + assertEquals(Set.of(GroupState.DEAD), options.groupStates()); + + Set groupStates = Set.of(GroupState.values()); + options = new ListGroupsOptions().inGroupStates(groupStates); + assertEquals(groupStates, options.groupStates()); + } + + @Test + public void testProtocolTypes() { + ListGroupsOptions options = new ListGroupsOptions(); + assertTrue(options.protocolTypes().isEmpty()); + + options.withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); + assertEquals(Set.of(ConsumerProtocol.PROTOCOL_TYPE), options.protocolTypes()); + + Set protocolTypes = Set.of("", "consumer", "share"); + options = new ListGroupsOptions().withProtocolTypes(protocolTypes); + assertEquals(protocolTypes, options.protocolTypes()); + } + + @Test + public void testTypes() { + ListGroupsOptions options = new ListGroupsOptions(); + assertTrue(options.types().isEmpty()); + + options.withTypes(Set.of(GroupType.CLASSIC)); + assertEquals(Set.of(GroupType.CLASSIC), options.types()); + + Set groupTypes = Set.of(GroupType.values()); + options = new ListGroupsOptions().withTypes(groupTypes); + assertEquals(groupTypes, options.types()); + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java index ce36eb0062260..48874f1a1b2db 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java @@ -66,8 +66,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -460,14 +462,14 @@ public synchronized ListTopicsResult listTopics(ListTopicsOptions options) { @Override public synchronized DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) { if (topics instanceof TopicIdCollection) - return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options))); + return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds()))); else if (topics instanceof TopicNameCollection) - return DescribeTopicsResult.ofTopicNames(new HashMap<>(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames(), options))); + return DescribeTopicsResult.ofTopicNames(new HashMap<>(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames()))); else throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for describeTopics."); } - private Map> handleDescribeTopicsByNames(Collection topicNames, DescribeTopicsOptions options) { + private Map> handleDescribeTopicsByNames(Collection topicNames) { Map> topicDescriptions = new HashMap<>(); if (timeoutNextRequests > 0) { @@ -507,7 +509,7 @@ private Map> handleDescribeTopicsByNames(C return topicDescriptions; } - public synchronized Map> handleDescribeTopicsUsingIds(Collection topicIds, DescribeTopicsOptions options) { + public synchronized Map> handleDescribeTopicsUsingIds(Collection topicIds) { Map> topicDescriptions = new HashMap<>(); @@ -553,15 +555,15 @@ public synchronized Map> handleDescribeTopi public synchronized DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) { DeleteTopicsResult result; if (topics instanceof TopicIdCollection) - result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options))); + result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds()))); else if (topics instanceof TopicNameCollection) - result = DeleteTopicsResult.ofTopicNames(new HashMap<>(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options))); + result = DeleteTopicsResult.ofTopicNames(new HashMap<>(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames()))); else throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for deleteTopics."); return result; } - private Map> handleDeleteTopicsUsingNames(Collection topicNameCollection, DeleteTopicsOptions options) { + private Map> handleDeleteTopicsUsingNames(Collection topicNameCollection) { Map> deleteTopicsResult = new HashMap<>(); Collection topicNames = new ArrayList<>(topicNameCollection); @@ -590,7 +592,7 @@ private Map> handleDeleteTopicsUsingNames(Collection> handleDeleteTopicsUsingIds(Collection topicIdCollection, DeleteTopicsOptions options) { + private Map> handleDeleteTopicsUsingIds(Collection topicIdCollection) { Map> deleteTopicsResult = new HashMap<>(); Collection topicIds = new ArrayList<>(topicIdCollection); @@ -1118,11 +1120,7 @@ public synchronized DescribeReplicaLogDirsResult describeReplicaLogDirs( DescribeLogDirsResponse.INVALID_OFFSET_LAG)); } else { ReplicaLogDirInfo info = replicaMoves.get(replica); - if (info == null) { - future.complete(new ReplicaLogDirInfo(currentLogDir, 0, null, 0)); - } else { - future.complete(info); - } + future.complete(Objects.requireNonNullElseGet(info, () -> new ReplicaLogDirInfo(currentLogDir, 0, null, 0))); } } } @@ -1172,8 +1170,7 @@ public synchronized ListPartitionReassignmentsResult listPartitionReassignments( Optional> partitions, ListPartitionReassignmentsOptions options) { Map map = new HashMap<>(); - for (TopicPartition partition : partitions.isPresent() ? - partitions.get() : reassignments.keySet()) { + for (TopicPartition partition : partitions.orElseGet(reassignments::keySet)) { PartitionReassignment reassignment = findPartitionReassignment(partition); if (reassignment != null) { map.put(partition, reassignment); @@ -1399,6 +1396,38 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, } @Override + public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + Set configResources = new HashSet<>(); + if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.TOPIC)) { + allTopics.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.TOPIC, name))); + } + + if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER)) { + for (int i = 0; i < brokers.size(); i++) { + configResources.add(new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(i))); + } + } + + if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER_LOGGER)) { + for (int i = 0; i < brokers.size(); i++) { + configResources.add(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, String.valueOf(i))); + } + } + + if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.CLIENT_METRICS)) { + clientMetricsConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name))); + } + + if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.GROUP)) { + groupConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.GROUP, name))); + } + future.complete(configResources); + return new ListConfigResourcesResult(future); + } + + @Override + @SuppressWarnings({"deprecation", "removal"}) public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.complete(clientMetricsConfigs.keySet().stream().map(ClientMetricsResourceListing::new).collect(Collectors.toList())); @@ -1431,7 +1460,7 @@ public synchronized ListShareGroupOffsetsResult listShareGroupOffsets(Map partitions, DeleteShareGroupOffsetsOptions options) { + public synchronized DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java index dfc12c578c8b1..ceb188a41759b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java @@ -58,7 +58,7 @@ public void setUp() { } @Test - public void testTopLevelErrorConstructor() throws InterruptedException { + public void testTopLevelErrorConstructor() { memberFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); RemoveMembersFromConsumerGroupResult topLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java index 7c87f21c64380..c4ffc657914cd 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java @@ -40,7 +40,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -397,7 +396,7 @@ public void testRetryLookupAfterDisconnect() { public void testRetryLookupAndDisableBatchAfterNoBatchedFindCoordinatorsException() { MockTime time = new MockTime(); LogContext lc = new LogContext(); - Set groupIds = new HashSet<>(Arrays.asList("g1", "g2")); + Set groupIds = Set.of("g1", "g2"); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(lc); AdminApiFuture future = AdminApiFuture.forKeys( groupIds.stream().map(CoordinatorKey::byGroupId).collect(Collectors.toSet())); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java index a61a7bdfda5f1..0581d672fb8a0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java @@ -32,17 +32,13 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class AdminBootstrapAddressesTest { - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testNoBootstrapSet(boolean nullValue) { - Map map = new HashMap<>(); - if (nullValue) { - map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, null); - map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, null); - } else { - map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, ""); - map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, ""); - } + + @Test + public void testNoBootstrapSet() { + Map map = Map.of( + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "", + AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, "" + ); AdminClientConfig config = new AdminClientConfig(map); assertEquals("You must set either bootstrap.servers or bootstrap.controllers", assertThrows(ConfigException.class, () -> AdminBootstrapAddresses.fromConfig(config)). diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java index 8cd9545107f23..cbbbe93e2d4c4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java @@ -55,9 +55,9 @@ public void testBuildOldLookupRequest() { @Test public void testBuildLookupRequest() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); - FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( + FindCoordinatorRequest.Builder request = strategy.buildRequest(Set.of( CoordinatorKey.byGroupId("foo"), - CoordinatorKey.byGroupId("bar")))); + CoordinatorKey.byGroupId("bar"))); assertEquals("", request.data().key()); assertEquals(2, request.data().coordinatorKeys().size()); assertEquals(CoordinatorType.GROUP, CoordinatorType.forId(request.data().keyType())); @@ -67,8 +67,8 @@ public void testBuildLookupRequest() { public void testBuildLookupRequestNonRepresentable() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( - CoordinatorKey.byGroupId("foo"), - null))); + CoordinatorKey.byGroupId("foo"), + null))); assertEquals("", request.data().key()); assertEquals(1, request.data().coordinatorKeys().size()); } @@ -90,7 +90,7 @@ public void testBuildOldLookupRequestRequiresAtLeastOneKey() { strategy.disableBatch(); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Collections.singletonList(CoordinatorKey.byTransactionalId("txnid"))))); + Set.of(CoordinatorKey.byTransactionalId("txnid")))); } @Test @@ -105,9 +105,9 @@ public void testBuildLookupRequestRequiresKeySameType() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Arrays.asList( - CoordinatorKey.byGroupId("group"), - CoordinatorKey.byTransactionalId("txnid"))))); + Set.of( + CoordinatorKey.byGroupId("group"), + CoordinatorKey.byTransactionalId("txnid")))); } @Test @@ -161,7 +161,7 @@ public void testSuccessfulCoordinatorLookup() { .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); Map expectedResult = new HashMap<>(); expectedResult.put(group1, 1); expectedResult.put(group2, 2); @@ -204,7 +204,7 @@ private void testRetriableCoordinatorLookup(Errors error) { .setHost("localhost") .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); assertEquals(emptyMap(), result.failedKeys); assertEquals(singletonMap(group2, 2), result.mappedKeys); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java index e975b2acbaed6..5d14529915a92 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java @@ -34,10 +34,8 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -55,7 +53,7 @@ public class DeleteConsumerGroupOffsetsHandlerTest { private final TopicPartition t0p0 = new TopicPartition("t0", 0); private final TopicPartition t0p1 = new TopicPartition("t0", 1); private final TopicPartition t1p0 = new TopicPartition("t1", 0); - private final Set tps = new HashSet<>(Arrays.asList(t0p0, t0p1, t1p0)); + private final Set tps = Set.of(t0p0, t0p1, t1p0); @Test public void testBuildRequest() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java index 444795b3680bc..eb3e99dc62167 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java @@ -53,7 +53,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -82,10 +81,10 @@ public class DescribeConsumerGroupsHandlerTest { CoordinatorKey.byGroupId(groupId2) )); private final Node coordinator = new Node(1, "host", 1234); - private final Set tps = new HashSet<>(Arrays.asList( + private final Set tps = Set.of( new TopicPartition("foo", 0), new TopicPartition("bar", 1) - )); + ); @ParameterizedTest @ValueSource(booleans = {true, false}) diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java index e3bb56347a8ae..19c614d3c60c8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java @@ -24,12 +24,14 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidGroupIdException; +import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; -import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; +import org.apache.kafka.common.message.OffsetFetchResponseData; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.utils.LogContext; import org.junit.jupiter.api.Test; @@ -41,7 +43,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -58,11 +59,11 @@ public class ListConsumerGroupOffsetsHandlerTest { private final LogContext logContext = new LogContext(); - private final int throttleMs = 10; - private final String groupZero = "group0"; - private final String groupOne = "group1"; - private final String groupTwo = "group2"; - private final List groups = Arrays.asList(groupZero, groupOne, groupTwo); + private final String group0 = "group0"; + private final String group1 = "group1"; + private final String group2 = "group2"; + private final String group3 = "group3"; + private final List groups = List.of(group0, group1, group2); private final TopicPartition t0p0 = new TopicPartition("t0", 0); private final TopicPartition t0p1 = new TopicPartition("t0", 1); private final TopicPartition t1p0 = new TopicPartition("t1", 0); @@ -70,84 +71,129 @@ public class ListConsumerGroupOffsetsHandlerTest { private final TopicPartition t2p0 = new TopicPartition("t2", 0); private final TopicPartition t2p1 = new TopicPartition("t2", 1); private final TopicPartition t2p2 = new TopicPartition("t2", 2); - private final Map singleRequestMap = Collections.singletonMap(groupZero, - new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t0p1, t1p0, t1p1))); - private final Map batchedRequestMap = - new HashMap() {{ - put(groupZero, new ListConsumerGroupOffsetsSpec().topicPartitions(singletonList(t0p0))); - put(groupOne, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1))); - put(groupTwo, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1, t2p0, t2p1, t2p2))); - }}; + private final TopicPartition t3p0 = new TopicPartition("t3", 0); + private final TopicPartition t3p1 = new TopicPartition("t3", 1); + + private final Map singleGroupSpec = Map.of( + group0, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t0p1, t1p0, t1p1)) + ); + private final Map multiGroupSpecs = Map.of( + group0, new ListConsumerGroupOffsetsSpec().topicPartitions(singletonList(t0p0)), + group1, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1)), + group2, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1, t2p0, t2p1, t2p2)) + ); @Test public void testBuildRequest() { - ListConsumerGroupOffsetsHandler handler = - new ListConsumerGroupOffsetsHandler(singleRequestMap, false, logContext); - OffsetFetchRequest request = handler.buildBatchedRequest(coordinatorKeys(groupZero)).build(); - assertEquals(groupZero, request.data().groups().get(0).groupId()); - assertEquals(2, request.data().groups().get(0).topics().size()); - assertEquals(2, request.data().groups().get(0).topics().get(0).partitionIndexes().size()); - assertEquals(2, request.data().groups().get(0).topics().get(1).partitionIndexes().size()); + var handler = new ListConsumerGroupOffsetsHandler( + singleGroupSpec, + false, + logContext + ); + + assertEquals( + new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group0) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t0p0.topic()) + .setPartitionIndexes(List.of(t0p0.partition(), t0p1.partition())), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t1p0.topic()) + .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())) + )) + )), + handler.buildBatchedRequest(coordinatorKeys(group0)).build().data() + ); } @Test public void testBuildRequestWithMultipleGroups() { - Map requestMap = new HashMap<>(this.batchedRequestMap); - String groupThree = "group3"; - requestMap.put(groupThree, new ListConsumerGroupOffsetsSpec() - .topicPartitions(Arrays.asList(new TopicPartition("t3", 0), new TopicPartition("t3", 1)))); - - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(requestMap, false, logContext); - OffsetFetchRequest request1 = handler.buildBatchedRequest(coordinatorKeys(groupZero, groupOne, groupTwo)).build(); - assertEquals(Set.of(groupZero, groupOne, groupTwo), requestGroups(request1)); - - OffsetFetchRequest request2 = handler.buildBatchedRequest(coordinatorKeys(groupThree)).build(); - assertEquals(Set.of(groupThree), requestGroups(request2)); - - Map builtRequests = new HashMap<>(); - request1.groupIdsToPartitions().forEach((group, partitions) -> - builtRequests.put(group, new ListConsumerGroupOffsetsSpec().topicPartitions(partitions))); - request2.groupIdsToPartitions().forEach((group, partitions) -> - builtRequests.put(group, new ListConsumerGroupOffsetsSpec().topicPartitions(partitions))); - - assertEquals(requestMap, builtRequests); - Map> groupIdsToTopics = request1.groupIdsToTopics(); - - assertEquals(3, groupIdsToTopics.size()); - assertEquals(1, groupIdsToTopics.get(groupZero).size()); - assertEquals(2, groupIdsToTopics.get(groupOne).size()); - assertEquals(3, groupIdsToTopics.get(groupTwo).size()); - - assertEquals(1, groupIdsToTopics.get(groupZero).get(0).partitionIndexes().size()); - assertEquals(1, groupIdsToTopics.get(groupOne).get(0).partitionIndexes().size()); - assertEquals(2, groupIdsToTopics.get(groupOne).get(1).partitionIndexes().size()); - assertEquals(1, groupIdsToTopics.get(groupTwo).get(0).partitionIndexes().size()); - assertEquals(2, groupIdsToTopics.get(groupTwo).get(1).partitionIndexes().size()); - assertEquals(3, groupIdsToTopics.get(groupTwo).get(2).partitionIndexes().size()); - - groupIdsToTopics = request2.groupIdsToTopics(); - assertEquals(1, groupIdsToTopics.size()); - assertEquals(1, groupIdsToTopics.get(groupThree).size()); - assertEquals(2, groupIdsToTopics.get(groupThree).get(0).partitionIndexes().size()); + var groupSpecs = new HashMap<>(multiGroupSpecs); + groupSpecs.put( + group3, + new ListConsumerGroupOffsetsSpec().topicPartitions(List.of(t3p0, t3p1)) + ); + + var handler = new ListConsumerGroupOffsetsHandler( + groupSpecs, + false, + logContext + ); + + var request1 = handler.buildBatchedRequest(coordinatorKeys(group0, group1, group2)).build(); + + assertEquals( + Set.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group0) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t0p0.topic()) + .setPartitionIndexes(List.of(t0p0.partition())) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group1) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t0p0.topic()) + .setPartitionIndexes(List.of(t0p0.partition())), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t1p0.topic()) + .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group2) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t0p0.topic()) + .setPartitionIndexes(List.of(t0p0.partition())), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t1p0.topic()) + .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t2p0.topic()) + .setPartitionIndexes(List.of(t2p0.partition(), t2p1.partition(), t2p2.partition())) + )) + ), + Set.copyOf(request1.data().groups()) + ); + + var request2 = handler.buildBatchedRequest(coordinatorKeys(group3)).build(); + + assertEquals( + Set.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group3) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(t3p0.topic()) + .setPartitionIndexes(List.of(t3p0.partition(), t3p1.partition())) + )) + ), + Set.copyOf(request2.data().groups()) + ); } @Test public void testBuildRequestBatchGroups() { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(batchedRequestMap, false, logContext); - Collection> requests = handler.buildRequest(1, coordinatorKeys(groupZero, groupOne, groupTwo)); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(multiGroupSpecs, false, logContext); + Collection> requests = handler.buildRequest(1, coordinatorKeys(group0, group1, group2)); assertEquals(1, requests.size()); - assertEquals(Set.of(groupZero, groupOne, groupTwo), requestGroups((OffsetFetchRequest) requests.iterator().next().request.build())); + assertEquals(Set.of(group0, group1, group2), requestGroups((OffsetFetchRequest) requests.iterator().next().request.build())); } @Test public void testBuildRequestDoesNotBatchGroup() { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(batchedRequestMap, false, logContext); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(multiGroupSpecs, false, logContext); // Disable batching. ((CoordinatorStrategy) handler.lookupStrategy()).disableBatch(); - Collection> requests = handler.buildRequest(1, coordinatorKeys(groupZero, groupOne, groupTwo)); + Collection> requests = handler.buildRequest(1, coordinatorKeys(group0, group1, group2)); assertEquals(3, requests.size()); assertEquals( - Set.of(Set.of(groupZero), Set.of(groupOne), Set.of(groupTwo)), + Set.of(Set.of(group0), Set.of(group1), Set.of(group2)), requests.stream().map(requestAndKey -> requestGroups((OffsetFetchRequest) requestAndKey.request.build())).collect(Collectors.toSet()) ); } @@ -170,32 +216,31 @@ public void testSuccessfulHandleResponseWithOnePartitionError() { @Test public void testSuccessfulHandleResponseWithOnePartitionErrorWithMultipleGroups() { - Map offsetAndMetadataMapZero = - Collections.singletonMap(t0p0, new OffsetAndMetadata(10L)); - Map offsetAndMetadataMapOne = - Collections.singletonMap(t1p1, new OffsetAndMetadata(10L)); - Map offsetAndMetadataMapTwo = - Collections.singletonMap(t2p2, new OffsetAndMetadata(10L)); - Map> expectedResult = - new HashMap<>() {{ - put(groupZero, offsetAndMetadataMapZero); - put(groupOne, offsetAndMetadataMapOne); - put(groupTwo, offsetAndMetadataMapTwo); - }}; + var expectedResult = Map.of( + group0, Map.of(t0p0, new OffsetAndMetadata(10L)), + group1, Map.of(t1p1, new OffsetAndMetadata(10L)), + group2, Map.of(t2p2, new OffsetAndMetadata(10L)) + ); assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.UNKNOWN_TOPIC_OR_PARTITION), expectedResult); + handleWithPartitionErrorMultipleGroups(Errors.UNKNOWN_TOPIC_OR_PARTITION), + expectedResult + ); assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.TOPIC_AUTHORIZATION_FAILED), expectedResult); + handleWithPartitionErrorMultipleGroups(Errors.TOPIC_AUTHORIZATION_FAILED), + expectedResult + ); assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.UNSTABLE_OFFSET_COMMIT), expectedResult); + handleWithPartitionErrorMultipleGroups(Errors.UNSTABLE_OFFSET_COMMIT), + expectedResult + ); } @Test public void testSuccessfulHandleResponseWithMultipleGroups() { Map> expected = new HashMap<>(); Map errorMap = errorMap(groups, Errors.NONE); - assertCompletedForMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap), expected); + assertCompletedForMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs), expected); } @Test @@ -206,11 +251,12 @@ public void testUnmappedHandleResponse() { @Test public void testUnmappedHandleResponseWithMultipleGroups() { - Map errorMap = new HashMap<>(); - errorMap.put(groupZero, Errors.NOT_COORDINATOR); - errorMap.put(groupOne, Errors.COORDINATOR_NOT_AVAILABLE); - errorMap.put(groupTwo, Errors.NOT_COORDINATOR); - assertUnmappedWithMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); + var errorMap = Map.of( + group0, Errors.NOT_COORDINATOR, + group1, Errors.COORDINATOR_NOT_AVAILABLE, + group2, Errors.NOT_COORDINATOR + ); + assertUnmappedWithMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs)); } @Test @@ -221,7 +267,7 @@ public void testRetriableHandleResponse() { @Test public void testRetriableHandleResponseWithMultipleGroups() { Map errorMap = errorMap(groups, Errors.COORDINATOR_LOAD_IN_PROGRESS); - assertRetriable(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); + assertRetriable(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs)); } @Test @@ -233,117 +279,226 @@ public void testFailedHandleResponse() { @Test public void testFailedHandleResponseWithMultipleGroups() { - Map errorMap = new HashMap<>(); - errorMap.put(groupZero, Errors.GROUP_AUTHORIZATION_FAILED); - errorMap.put(groupOne, Errors.GROUP_ID_NOT_FOUND); - errorMap.put(groupTwo, Errors.INVALID_GROUP_ID); - Map> groupToExceptionMap = new HashMap<>(); - groupToExceptionMap.put(groupZero, GroupAuthorizationException.class); - groupToExceptionMap.put(groupOne, GroupIdNotFoundException.class); - groupToExceptionMap.put(groupTwo, InvalidGroupIdException.class); - assertFailedForMultipleGroups(groupToExceptionMap, - handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); + var errorMap = Map.of( + group0, Errors.GROUP_AUTHORIZATION_FAILED, + group1, Errors.GROUP_ID_NOT_FOUND, + group2, Errors.INVALID_GROUP_ID + ); + var groupToExceptionMap = Map.of( + group0, (Class) GroupAuthorizationException.class, + group1, (Class) GroupIdNotFoundException.class, + group2, (Class) InvalidGroupIdException.class + ); + assertFailedForMultipleGroups( + groupToExceptionMap, + handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs) + ); } private OffsetFetchResponse buildResponse(Errors error) { return new OffsetFetchResponse( - throttleMs, - Collections.singletonMap(groupZero, error), - Collections.singletonMap(groupZero, new HashMap<>())); - } - - private OffsetFetchResponse buildResponseWithMultipleGroups( - Map errorMap, - Map> responseData - ) { - return new OffsetFetchResponse(throttleMs, errorMap, responseData); + new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group0) + .setErrorCode(error.code()) + )), + ApiKeys.OFFSET_FETCH.latestVersion() + ); } private AdminApiHandler.ApiResult> handleWithErrorWithMultipleGroups( Map errorMap, Map groupSpecs ) { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(groupSpecs, false, logContext); - Map> responseData = new HashMap<>(); - for (String group : errorMap.keySet()) { - responseData.put(group, new HashMap<>()); - } - OffsetFetchResponse response = buildResponseWithMultipleGroups(errorMap, responseData); + var handler = new ListConsumerGroupOffsetsHandler( + groupSpecs, + false, + logContext + ); + var response = new OffsetFetchResponse( + new OffsetFetchResponseData() + .setGroups(errorMap.entrySet().stream().map(entry -> + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(entry.getKey()) + .setErrorCode(entry.getValue().code()) + ).collect(Collectors.toList())), + ApiKeys.OFFSET_FETCH.latestVersion() + ); return handler.handleResponse(new Node(1, "host", 1234), - errorMap.keySet() - .stream() - .map(CoordinatorKey::byGroupId) - .collect(Collectors.toSet()), - response); + errorMap.keySet() + .stream() + .map(CoordinatorKey::byGroupId) + .collect(Collectors.toSet()), + response + ); } private OffsetFetchResponse buildResponseWithPartitionError(Errors error) { - - Map responseData = new HashMap<>(); - responseData.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); - responseData.put(t0p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - - return new OffsetFetchResponse(Errors.NONE, responseData); + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group0) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t0p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t0p0.partition()) + .setCommittedOffset(10), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t0p1.partition()) + .setCommittedOffset(10) + .setErrorCode(error.code()) + )) + )) + )), + ApiKeys.OFFSET_FETCH.latestVersion() + ); } private OffsetFetchResponse buildResponseWithPartitionErrorWithMultipleGroups(Errors error) { - Map responseDataZero = new HashMap<>(); - responseDataZero.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); - - Map responseDataOne = new HashMap<>(); - responseDataOne.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataOne.put(t1p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataOne.put(t1p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); - - Map responseDataTwo = new HashMap<>(); - responseDataTwo.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataTwo.put(t1p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataTwo.put(t1p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataTwo.put(t2p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataTwo.put(t2p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); - responseDataTwo.put(t2p2, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); - - Map> responseData = - new HashMap<>() {{ - put(groupZero, responseDataZero); - put(groupOne, responseDataOne); - put(groupTwo, responseDataTwo); - }}; - - Map errorMap = errorMap(groups, Errors.NONE); - return new OffsetFetchResponse(0, errorMap, responseData); + var data = new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group0) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t0p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t0p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(Errors.NONE.code()) + )) + )), + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group1) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t0p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t0p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()) + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t1p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p1.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(Errors.NONE.code()) + )) + )), + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(group2) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t0p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t0p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()) + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t1p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p1.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()) + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t2p0.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t2p0.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t2p1.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(error.code()), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t2p2.partition()) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setErrorCode(Errors.NONE.code()) + )) + )) + )); + + return new OffsetFetchResponse(data, ApiKeys.OFFSET_FETCH.latestVersion()); } private AdminApiHandler.ApiResult> handleWithPartitionError( Errors error ) { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(singleRequestMap, - false, logContext); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( + singleGroupSpec, + false, + logContext + ); OffsetFetchResponse response = buildResponseWithPartitionError(error); return handler.handleResponse(new Node(1, "host", 1234), - singleton(CoordinatorKey.byGroupId(groupZero)), response); + singleton(CoordinatorKey.byGroupId(group0)), response); } private AdminApiHandler.ApiResult> handleWithPartitionErrorMultipleGroups( Errors error ) { ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( - batchedRequestMap, false, logContext); + multiGroupSpecs, + false, + logContext + ); OffsetFetchResponse response = buildResponseWithPartitionErrorWithMultipleGroups(error); return handler.handleResponse( new Node(1, "host", 1234), - coordinatorKeys(groupZero, groupOne, groupTwo), - response); + coordinatorKeys(group0, group1, group2), + response + ); } private AdminApiHandler.ApiResult> handleWithError( Errors error ) { ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( - singleRequestMap, false, logContext); + singleGroupSpec, false, logContext); OffsetFetchResponse response = buildResponse(error); return handler.handleResponse(new Node(1, "host", 1234), - singleton(CoordinatorKey.byGroupId(groupZero)), + singleton(CoordinatorKey.byGroupId(group0)), response); } @@ -352,7 +507,7 @@ private void assertUnmapped( ) { assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptySet(), result.failedKeys.keySet()); - assertEquals(singletonList(CoordinatorKey.byGroupId(groupZero)), result.unmappedKeys); + assertEquals(singletonList(CoordinatorKey.byGroupId(group0)), result.unmappedKeys); } private void assertUnmappedWithMultipleGroups( @@ -360,7 +515,7 @@ private void assertUnmappedWithMultipleGroups( ) { assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptySet(), result.failedKeys.keySet()); - assertEquals(coordinatorKeys(groupZero, groupOne, groupTwo), new HashSet<>(result.unmappedKeys)); + assertEquals(coordinatorKeys(group0, group1, group2), new HashSet<>(result.unmappedKeys)); } private void assertRetriable( @@ -375,7 +530,7 @@ private void assertCompleted( AdminApiHandler.ApiResult> result, Map expected ) { - CoordinatorKey key = CoordinatorKey.byGroupId(groupZero); + CoordinatorKey key = CoordinatorKey.byGroupId(group0); assertEquals(emptySet(), result.failedKeys.keySet()); assertEquals(emptyList(), result.unmappedKeys); assertEquals(singleton(key), result.completedKeys.keySet()); @@ -399,7 +554,7 @@ private void assertFailed( Class expectedExceptionType, AdminApiHandler.ApiResult> result ) { - CoordinatorKey key = CoordinatorKey.byGroupId(groupZero); + CoordinatorKey key = CoordinatorKey.byGroupId(group0); assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptyList(), result.unmappedKeys); assertEquals(singleton(key), result.failedKeys.keySet()); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java index 52073551e3d79..2c2529a0664c1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.OptionalInt; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -43,6 +44,7 @@ import static java.util.Collections.singleton; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; public class ListTransactionsHandlerTest { @@ -86,6 +88,42 @@ public void testBuildRequestWithFilteredState() { assertEquals(Collections.emptyList(), request.data().producerIdFilters()); } + + @Test + public void testBuildRequestWithFilteredTransactionalIdPattern() { + int brokerId = 1; + BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); + String filteredTransactionalIdPattern = "^special-.*"; + ListTransactionsOptions options = new ListTransactionsOptions() + .filterOnTransactionalIdPattern(filteredTransactionalIdPattern); + ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); + ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); + assertEquals(filteredTransactionalIdPattern, request.data().transactionalIdPattern()); + assertEquals(List.of(), request.data().stateFilters()); + } + + @Test + public void testBuildRequestWithNullFilteredTransactionalIdPattern() { + int brokerId = 1; + BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); + ListTransactionsOptions options = new ListTransactionsOptions() + .filterOnTransactionalIdPattern(null); + ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); + ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); + assertNull(request.data().transactionalIdPattern()); + } + + @Test + public void testBuildRequestWithEmptyFilteredTransactionalIdPattern() { + int brokerId = 1; + BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); + ListTransactionsOptions options = new ListTransactionsOptions() + .filterOnTransactionalIdPattern(""); + ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); + ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); + assertNull(request.data().transactionalIdPattern()); + } + @Test public void testBuildRequestWithDurationFilter() { int brokerId = 1; diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/CloseOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/CloseOptionsTest.java new file mode 100644 index 0000000000000..c33f73dc7a398 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/CloseOptionsTest.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.Optional; + +import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.DEFAULT; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class CloseOptionsTest { + + @Test + void operationShouldNotBeNull() { + assertThrows(NullPointerException.class, () -> CloseOptions.groupMembershipOperation(null)); + assertThrows(NullPointerException.class, () -> CloseOptions.timeout(Duration.ZERO).withGroupMembershipOperation(null)); + } + + @Test + void operationShouldHaveDefaultValue() { + assertEquals(DEFAULT, CloseOptions.timeout(Duration.ZERO).groupMembershipOperation()); + } + + @Test + void timeoutCouldBeNull() { + CloseOptions closeOptions = assertDoesNotThrow(() -> CloseOptions.timeout(null)); + assertEquals(Optional.empty(), closeOptions.timeout()); + } + + @Test + void timeoutShouldBeDefaultEmpty() { + assertEquals(Optional.empty(), CloseOptions.groupMembershipOperation(DEFAULT).timeout()); + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java index 2fa5515fb4073..bed84d67befbd 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java @@ -30,6 +30,8 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; +import java.io.FileInputStream; +import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.Locale; @@ -41,6 +43,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class ConsumerConfigTest { @@ -56,6 +59,7 @@ public class ConsumerConfigTest { public void setUp() { properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); + properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); } @Test @@ -144,6 +148,7 @@ public void testInvalidGroupInstanceId() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, ""); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)); } @@ -154,6 +159,7 @@ public void testInvalidSecurityProtocol() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -164,6 +170,7 @@ public void testCaseInsensitiveSecurityProtocol() { final Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(saslSslLowerCase, consumerConfig.originals().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); @@ -174,6 +181,7 @@ public void testDefaultConsumerGroupConfig() { final Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals("classic", consumerConfig.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); assertNull(consumerConfig.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); @@ -188,6 +196,7 @@ public void testRemoteAssignorConfig() { configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, remoteAssignorName); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, protocol); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(protocol, consumerConfig.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); assertEquals(remoteAssignorName, consumerConfig.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); @@ -200,6 +209,7 @@ public void testRemoteAssignorWithClassicGroupProtocol() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, remoteAssignorName); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException exception = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(exception.getMessage().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG + " cannot be set when " + ConsumerConfig.GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CLASSIC.name())); } @@ -209,6 +219,7 @@ public void testDefaultMetadataRecoveryStrategy() { Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, consumerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -219,6 +230,7 @@ public void testInvalidMetadataRecoveryStrategy() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "abc"); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -230,6 +242,7 @@ public void testProtocolConfigValidation(String protocol, boolean isValid) { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, protocol); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); if (isValid) { ConsumerConfig config = new ConsumerConfig(configs); assertEquals(protocol, config.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); @@ -250,10 +263,33 @@ private void testUnsupportedConfigsWithConsumerGroupProtocol(String configName, ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass, ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass, ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name(), + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092", configName, value ); ConfigException exception = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); - assertEquals(configName + " cannot be set when " + + assertEquals(configName + " cannot be set when " + ConsumerConfig.GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CONSUMER.name(), exception.getMessage()); } + + /** + * Validates config/consumer.properties file to avoid getting out of sync with ConsumerConfig. + */ + @Test + public void testValidateConfigPropertiesFile() { + Properties props = new Properties(); + + try (InputStream inputStream = new FileInputStream(System.getProperty("user.dir") + "/../config/consumer.properties")) { + props.load(inputStream); + } catch (Exception e) { + fail("Failed to load config/consumer.properties file: " + e.getMessage()); + } + + ConsumerConfig config = new ConsumerConfig(props); + + for (String key : config.originals().keySet()) { + if (!ConsumerConfig.configDef().configKeys().containsKey(key)) { + fail("Invalid configuration key: " + key); + } + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java index b4f649de579ae..c08c7766ec1c7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java @@ -151,6 +151,7 @@ private ConsumerConfig initConsumerConfigWithClassTypes(List classTypes) props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, classTypes); props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name()); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new ConsumerConfig(props); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java index b85d000e167b9..6a6aa919be149 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java @@ -81,7 +81,7 @@ public void testEncodeAndDecodeGeneration() { Optional encodedGeneration = ((CooperativeStickyAssignor) assignor).memberData(subscription).generation; assertTrue(encodedGeneration.isPresent()); - assertEquals(encodedGeneration.get(), DEFAULT_GENERATION); + assertEquals(DEFAULT_GENERATION, encodedGeneration.get()); int generation = 10; assignor.onAssignment(null, new ConsumerGroupMetadata("dummy-group-id", generation, "dummy-member-id", Optional.empty())); @@ -90,7 +90,7 @@ public void testEncodeAndDecodeGeneration() { encodedGeneration = ((CooperativeStickyAssignor) assignor).memberData(subscription).generation; assertTrue(encodedGeneration.isPresent()); - assertEquals(encodedGeneration.get(), generation); + assertEquals(generation, encodedGeneration.get()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index 5cdb268e04576..78ff15cee5f8e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -60,6 +60,7 @@ import org.apache.kafka.common.message.ListOffsetsResponseData; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.KafkaMetric; @@ -152,10 +153,10 @@ import javax.management.MBeanServer; import javax.management.ObjectName; -import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; import static org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.DEFAULT_REASON; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID; import static org.apache.kafka.common.utils.Utils.propsToMap; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; @@ -226,7 +227,7 @@ public class KafkaConsumerTest { private final String partitionAssigned = "Hit partition assign "; private final String partitionLost = "Hit partition lost "; - private final Collection singleTopicPartition = Collections.singleton(new TopicPartition(topic, 0)); + private final Collection singleTopicPartition = Set.of(new TopicPartition(topic, 0)); private final Time time = new MockTime(); private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final ConsumerPartitionAssignor assignor = new RoundRobinAssignor(); @@ -236,7 +237,7 @@ public class KafkaConsumerTest { @AfterEach public void cleanup() { if (consumer != null) { - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); } } @@ -272,6 +273,35 @@ public void testSubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics( } } + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testAssignedPartitionsMetrics(GroupProtocol groupProtocol) throws InterruptedException { + consumer = newConsumer(groupProtocol, time, mock(KafkaClient.class), subscription, + mock(ConsumerMetadata.class), assignor, false, groupInstanceId); + Metrics metrics = consumer.metricsRegistry(); + + // This metric is added in the background thread for the AsyncConsumer, so waiting on it to avoid flakiness. + TestUtils.waitForCondition(() -> getMetric(metrics, "assigned-partitions") != null, + "Consumer should register the assigned-partitions metric"); + assertNotNull(getMetric(metrics, "assigned-partitions")); + assertEquals(0.0d, getMetric(metrics, "assigned-partitions").metricValue()); + + subscription.assignFromUser(Set.of(tp0)); + assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue()); + + subscription.assignFromUser(Set.of(tp0, tp1)); + assertEquals(2.0d, getMetric(metrics, "assigned-partitions").metricValue()); + + subscription.unsubscribe(); + subscription.subscribe(Set.of(topic), Optional.empty()); + subscription.assignFromSubscribed(Set.of(tp0)); + assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue()); + } + + private KafkaMetric getMetric(Metrics metrics, String name) { + return metrics.metrics().get(metrics.metricName(name, CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX)); + } + @ParameterizedTest @EnumSource(GroupProtocol.class) public void testUnsubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) { @@ -353,8 +383,10 @@ public void testMetricsReporterAutoGeneratedClientId(GroupProtocol groupProtocol assertEquals(2, consumer.metricsRegistry().reporters().size()); MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) consumer.metricsRegistry().reporters().stream() - .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().get(); + .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().orElseThrow(); assertEquals(consumer.clientId(), mockMetricsReporter.clientId); + + consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -404,11 +436,11 @@ public void testPollReturnsRecords(GroupProtocol groupProtocol) { ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); - assertEquals(records.count(), 5); - assertEquals(records.partitions(), Collections.singleton(tp0)); - assertEquals(records.records(tp0).size(), 5); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(5, Optional.empty(), "")); + assertEquals(5, records.count()); + assertEquals(Set.of(tp0), records.partitions()); + assertEquals(5, records.records(tp0).size()); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(5), records.nextOffsets().get(tp0)); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -425,17 +457,17 @@ public void testSecondPollWithDeserializationErrorThrowsRecordDeserializationExc ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(invalidRecordNumber - 1, records.count()); - assertEquals(Collections.singleton(tp0), records.partitions()); + assertEquals(Set.of(tp0), records.partitions()); assertEquals(invalidRecordNumber - 1, records.records(tp0).size()); long lastOffset = records.records(tp0).get(records.records(tp0).size() - 1).offset(); assertEquals(invalidRecordNumber - 2, lastOffset); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(lastOffset + 1, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(lastOffset + 1), records.nextOffsets().get(tp0)); RecordDeserializationException rde = assertThrows(RecordDeserializationException.class, () -> consumer.poll(Duration.ZERO)); assertEquals(invalidRecordOffset, rde.offset()); assertEquals(tp0, rde.topicPartition()); - assertEquals(rde.offset(), consumer.position(tp0)); + assertEquals(consumer.position(tp0), rde.offset()); } /* @@ -482,11 +514,11 @@ public String deserialize(String topic, Headers headers, ByteBuffer data) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, Optional.of(deserializer), false); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, singletonList(tp), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, List.of(tp), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); client.prepareResponseFrom(fetchResponse(tp, 0, recordCount), node); return consumer; @@ -578,17 +610,17 @@ public void shouldIgnoreGroupInstanceIdForEmptyGroupId(GroupProtocol groupProtoc public void testSubscription(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); - consumer.subscribe(singletonList(topic)); - assertEquals(singleton(topic), consumer.subscription()); + consumer.subscribe(List.of(topic)); + assertEquals(Set.of(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); assertTrue(consumer.subscription().isEmpty()); - assertEquals(singleton(tp0), consumer.assignment()); + assertEquals(Set.of(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); @@ -614,7 +646,7 @@ public void testSubscriptionOnNullTopic(GroupProtocol groupProtocol) { public void testSubscriptionOnEmptyTopic(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); String emptyTopic = " "; - assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic))); + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(List.of(emptyTopic))); } @ParameterizedTest @@ -643,14 +675,14 @@ public void testSubscriptionWithEmptyPartitionAssignment(GroupProtocol groupProt props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumer = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertThrows(IllegalStateException.class, - () -> consumer.subscribe(singletonList(topic))); + () -> consumer.subscribe(List.of(topic))); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testSeekNegative(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - consumer.assign(singleton(new TopicPartition("nonExistTopic", 0))); + consumer.assign(Set.of(new TopicPartition("nonExistTopic", 0))); assertThrows(IllegalArgumentException.class, () -> consumer.seek(new TopicPartition("nonExistTopic", 0), -1)); } @@ -675,14 +707,14 @@ public void testAssignOnEmptyTopicPartition(GroupProtocol groupProtocol) { @EnumSource(GroupProtocol.class) public void testAssignOnNullTopicInPartition(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(null, 0)))); + assertThrows(IllegalArgumentException.class, () -> consumer.assign(Set.of(new TopicPartition(null, 0)))); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testAssignOnEmptyTopicInPartition(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(" ", 0)))); + assertThrows(IllegalArgumentException.class, () -> consumer.assign(Set.of(new TopicPartition(" ", 0)))); } @ParameterizedTest @@ -700,7 +732,7 @@ public void testInterceptorConstructorClose(GroupProtocol groupProtocol) { assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(0, MockConsumerInterceptor.CLOSE_COUNT.get()); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); // Cluster metadata will only be updated on calling poll. @@ -715,26 +747,27 @@ public void testInterceptorConstructorClose(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(GroupProtocol.class) public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemainingInstances(GroupProtocol groupProtocol) { - final int targetInterceptor = 3; + final int targetInterceptor = 1; try { Properties props = new Properties(); props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName() + ", " - + MockConsumerInterceptor.class.getName() + ", " - + MockConsumerInterceptor.class.getName()); + props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, + CloseInterceptor.class.getName() + "," + MockConsumerInterceptor.class.getName()); MockConsumerInterceptor.setThrowOnConfigExceptionThreshold(targetInterceptor); assertThrows(KafkaException.class, () -> newConsumer( props, new StringDeserializer(), new StringDeserializer())); - assertEquals(3, MockConsumerInterceptor.CONFIG_COUNT.get()); - assertEquals(3, MockConsumerInterceptor.CLOSE_COUNT.get()); + assertEquals(1, MockConsumerInterceptor.CONFIG_COUNT.get()); + assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); + assertEquals(1, CloseInterceptor.CLOSE_COUNT.get()); } finally { MockConsumerInterceptor.resetCounters(); + CloseInterceptor.resetCounters(); } } @@ -743,14 +776,14 @@ public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemai public void testPause(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); - consumer.assign(singletonList(tp0)); - assertEquals(singleton(tp0), consumer.assignment()); + consumer.assign(List.of(tp0)); + assertEquals(Set.of(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); - consumer.pause(singleton(tp0)); - assertEquals(singleton(tp0), consumer.paused()); + consumer.pause(Set.of(tp0)); + assertEquals(Set.of(tp0), consumer.paused()); - consumer.resume(singleton(tp0)); + consumer.resume(Set.of(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); @@ -818,19 +851,19 @@ public void verifyHeartbeatSent(GroupProtocol groupProtocol) throws Exception { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); // initial fetch client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); - assertEquals(singleton(tp0), consumer.assignment()); + assertEquals(Set.of(tp0), consumer.assignment()); AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator, Errors.NONE); @@ -851,12 +884,12 @@ public void verifyHeartbeatSentWhenFetchedDataReady(GroupProtocol groupProtocol) ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -882,12 +915,12 @@ public void verifyPollTimesOutDuringMetadataUpdate(GroupProtocol groupProtocol) final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); // Since we would enable the heartbeat thread after received join-response which could // send the sync-group on behalf of the consumer if it is enqueued, we may still complete // the rebalance and send out the fetch; in order to avoid it we do not prepare sync response here. @@ -907,22 +940,22 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithSeek(GroupProtocol g ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, null, groupInstanceId, false); - consumer.assign(singleton(tp0)); - consumer.seekToBeginning(singleton(tp0)); + consumer.assign(Set.of(tp0)); + consumer.seekToBeginning(Set.of(tp0)); - // there shouldn't be any need to lookup the coordinator or fetch committed offsets. - // we just lookup the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); + // there shouldn't be any need to look up the coordinator or fetch committed offsets. + // we just look up the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Map.of(tp0, 50L))); client.prepareResponse(fetchResponse(tp0, 50L, 5)); ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -933,12 +966,12 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit(GroupPr ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); // create a consumer with groupID with manual assignment consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(singleton(tp0)); + consumer.assign(Set.of(tp0)); // 1st coordinator error should cause coordinator unknown client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.COORDINATOR_NOT_AVAILABLE, groupId, node), node); @@ -947,23 +980,23 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit(GroupPr // 2nd coordinator error should find the correct coordinator and clear the findCoordinatorFuture client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); - client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 50L), Errors.NONE)); + client.prepareResponse(offsetResponse(Map.of(tp0, 50L), Errors.NONE)); client.prepareResponse(fetchResponse(tp0, 50L, 5)); @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(0)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); // after coordinator found, consumer should be able to commit the offset successfully - client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE))); - consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(55L))); + client.prepareResponse(offsetCommitResponse(Map.of(tp0, Errors.NONE))); + consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(55L))); // verify the offset is committed - client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 55L), Errors.NONE)); - assertEquals(55, consumer.committed(Collections.singleton(tp0), Duration.ZERO).get(tp0).offset()); + client.prepareResponse(offsetResponse(Map.of(tp0, 55L), Errors.NONE)); + assertEquals(55, consumer.committed(Set.of(tp0), Duration.ZERO).get(tp0).offset()); } @ParameterizedTest @@ -974,7 +1007,7 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); if (groupProtocol == GroupProtocol.CONSUMER) { Node node = metadata.fetch().nodes().get(0); @@ -983,8 +1016,8 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro consumer = newConsumerNoAutoCommit(groupProtocol, time, client, subscription, metadata); consumer.assign(Arrays.asList(tp0, tp1)); - consumer.seekToEnd(singleton(tp0)); - consumer.seekToBeginning(singleton(tp1)); + consumer.seekToEnd(Set.of(tp0)); + consumer.seekToBeginning(Set.of(tp1)); client.prepareResponse(body -> { ListOffsetsRequest request = (ListOffsetsRequest) body; @@ -1001,13 +1034,13 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro .setPartitionIndex(tp1.partition()) .setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP); return partitions.contains(expectedTp0) && partitions.contains(expectedTp1); - }, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_OR_FOLLOWER))); + }, listOffsetsResponse(Map.of(tp0, 50L), Map.of(tp1, Errors.NOT_LEADER_OR_FOLLOWER))); client.prepareResponse( body -> { FetchRequest request = (FetchRequest) body; Map fetchData = request.fetchData(topicNames); TopicIdPartition tidp0 = new TopicIdPartition(topicIds.get(tp0.topic()), tp0); - return fetchData.keySet().equals(singleton(tidp0)) && + return fetchData.keySet().equals(Set.of(tidp0)) && fetchData.get(tidp0).fetchOffset == 50L; }, fetchResponse(tp0, 50L, 5)); @@ -1015,9 +1048,9 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); - assertEquals(singleton(tp0), records.partitions()); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(records.records(tp0).get(records.count() - 1).offset() + 1, Optional.empty(), "")); + assertEquals(Set.of(tp0), records.partitions()); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(records.records(tp0).get(records.count() - 1).offset() + 1), records.nextOffsets().get(tp0)); } private void initMetadata(MockClient mockClient, Map partitionCounts) { @@ -1037,16 +1070,16 @@ public void testMissingOffsetNoResetPolicy(GroupProtocol groupProtocol) throws I ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, -1L), Errors.NONE), coordinator); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the offset fetch event added by a call to poll, to be processed @@ -1066,17 +1099,17 @@ public void testResetToCommittedOffset(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); @@ -1102,18 +1135,18 @@ private void setUpConsumerWithAutoResetPolicy(GroupProtocol groupProtocol, AutoO ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); - client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, -1L), Errors.NONE), coordinator); + client.prepareResponse(listOffsetsResponse(Map.of(tp0, 50L))); consumer.poll(Duration.ZERO); } @@ -1125,14 +1158,14 @@ public void testOffsetIsValidAfterSeek(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, Optional.empty(), false); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); consumer.seek(tp0, 20L); consumer.poll(Duration.ZERO); - assertEquals(subscription.validPosition(tp0).offset, 20L); + assertEquals(20L, subscription.validPosition(tp0).offset); } @ParameterizedTest @@ -1144,19 +1177,19 @@ public void testCommitsFetchedDuringAssign(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); // lookup coordinator Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // fetch offset for one topic - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); - assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, offset1), Errors.NONE), coordinator); + assertEquals(offset1, consumer.committed(Set.of(tp0)).get(tp0).offset()); consumer.assign(Arrays.asList(tp0, tp1)); @@ -1164,20 +1197,22 @@ public void testCommitsFetchedDuringAssign(GroupProtocol groupProtocol) { Map offsets = new HashMap<>(); offsets.put(tp0, offset1); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); + assertEquals(offset1, consumer.committed(Set.of(tp0)).get(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, offset2); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(offset2, consumer.committed(Collections.singleton(tp1)).get(tp1).offset()); + assertEquals(offset2, consumer.committed(Set.of(tp1)).get(tp1).offset()); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInCommitted(GroupProtocol groupProtocol) { - assertThrows(UnsupportedVersionException.class, () -> setupThrowableConsumer(groupProtocol).committed(Collections.singleton(tp0))); + assertThrows(UnsupportedVersionException.class, () -> setupThrowableConsumer(groupProtocol).committed(Set.of(tp0))); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInPoll(GroupProtocol groupProtocol) throws InterruptedException { @@ -1194,6 +1229,7 @@ public void testFetchStableOffsetThrowInPoll(GroupProtocol groupProtocol) throws }, "Failed to throw UnsupportedVersionException in poll"); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { @@ -1206,7 +1242,7 @@ public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.OFFSET_FETCH.id, (short) 0, (short) 6)); Node node = metadata.fetch().nodes().get(0); @@ -1214,12 +1250,12 @@ public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer( groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, true); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse( - Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); + Map.of(tp0, offset1), Errors.NONE), coordinator); return consumer; } @@ -1231,7 +1267,7 @@ public void testNoCommittedOffsets(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); @@ -1257,12 +1293,12 @@ public void testAutoCommitSentBeforePositionUpdate(GroupProtocol groupProtocol) ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1300,7 +1336,7 @@ public void testRegexSubscription(GroupProtocol groupProtocol) { Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); + prepareRebalance(client, node, Set.of(topic), assignor, List.of(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); @@ -1308,8 +1344,8 @@ public void testRegexSubscription(GroupProtocol groupProtocol) { consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); - assertEquals(singleton(topic), consumer.subscription()); - assertEquals(singleton(tp0), consumer.assignment()); + assertEquals(Set.of(topic), consumer.subscription()); + assertEquals(Set.of(tp0), consumer.assignment()); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1332,21 +1368,21 @@ public void testChangingRegexSubscription(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - Node coordinator = prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); + Node coordinator = prepareRebalance(client, node, Set.of(topic), assignor, List.of(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); - assertEquals(singleton(topic), consumer.subscription()); + assertEquals(Set.of(topic), consumer.subscription()); consumer.subscribe(Pattern.compile(otherTopic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, topicIds)); - prepareRebalance(client, node, singleton(otherTopic), assignor, singletonList(otherTopicPartition), coordinator); + prepareRebalance(client, node, Set.of(otherTopic), assignor, List.of(otherTopicPartition), coordinator); consumer.poll(Duration.ZERO); - assertEquals(singleton(otherTopic), consumer.subscription()); + assertEquals(Set.of(otherTopic), consumer.subscription()); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1357,12 +1393,12 @@ public void testWakeupWithFetchDataAvailable(GroupProtocol groupProtocol) throws ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, List.of(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1382,8 +1418,8 @@ public void testWakeupWithFetchDataAvailable(GroupProtocol groupProtocol) throws @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(5, records.count()); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(5, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(5), records.nextOffsets().get(tp0)); // Increment time asynchronously to clear timeouts in closing the consumer final ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor(); exec.scheduleAtFixedRate(() -> time.sleep(sessionTimeoutMs), 0L, 10L, TimeUnit.MILLISECONDS); @@ -1398,12 +1434,12 @@ public void testPollThrowsInterruptExceptionIfInterrupted(GroupProtocol groupPro final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, List.of(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1425,13 +1461,13 @@ public void fetchResponseWithUnexpectedPartitionIsIgnored(GroupProtocol groupPro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(List.of(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, singletonList(tp0), null); + prepareRebalance(client, node, assignor, List.of(tp0), null); Map fetches1 = new HashMap<>(); fetches1.put(tp0, new FetchInfo(0, 1)); @@ -1443,7 +1479,7 @@ public void fetchResponseWithUnexpectedPartitionIsIgnored(GroupProtocol groupPro @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(0, records.count()); - assertEquals(records.nextOffsets().size(), 0); + assertEquals(0, records.nextOffsets().size()); } /** @@ -1543,9 +1579,9 @@ public void testSubscriptionChangesWithAutoCommitEnabled(GroupProtocol groupProt assertEquals(101, records.count()); assertEquals(2L, consumer.position(tp0)); assertEquals(100L, consumer.position(t3p0)); - assertEquals(records.nextOffsets().size(), 2); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(2, Optional.empty(), "")); - assertEquals(records.nextOffsets().get(t3p0), new OffsetAndMetadata(100, Optional.empty(), "")); + assertEquals(2, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(2), records.nextOffsets().get(tp0)); + assertEquals(new OffsetAndMetadata(100), records.nextOffsets().get(t3p0)); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -1593,23 +1629,23 @@ public void testSubscriptionChangesWithAutoCommitDisabled(GroupProtocol groupPro initializeSubscriptionWithSingleTopic(consumer, getConsumerRebalanceListener(consumer)); // mock rebalance responses - prepareRebalance(client, node, assignor, singletonList(tp0), null); + prepareRebalance(client, node, assignor, List.of(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); // verify that subscription is still the same, and now assignment has caught up - assertEquals(singleton(topic), consumer.subscription()); - assertEquals(singleton(tp0), consumer.assignment()); + assertEquals(Set.of(topic), consumer.subscription()); + assertEquals(Set.of(tp0), consumer.assignment()); consumer.poll(Duration.ZERO); // subscription change - consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer)); + consumer.subscribe(Set.of(topic2), getConsumerRebalanceListener(consumer)); // verify that subscription has changed but assignment is still unchanged - assertEquals(singleton(topic2), consumer.subscription()); - assertEquals(singleton(tp0), consumer.assignment()); + assertEquals(Set.of(topic2), consumer.subscription()); + assertEquals(Set.of(tp0), consumer.assignment()); // the auto commit is disabled, so no offset commit request should be sent for (ClientRequest req: client.requests()) @@ -1637,7 +1673,7 @@ public void testUnsubscribeShouldTriggerPartitionsRevokedWithValidGeneration(Gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); CooperativeStickyAssignor assignor = new CooperativeStickyAssignor(); @@ -1645,7 +1681,7 @@ public void testUnsubscribeShouldTriggerPartitionsRevokedWithValidGeneration(Gro initializeSubscriptionWithSingleTopic(consumer, getExceptionConsumerRebalanceListener()); - prepareRebalance(client, node, assignor, singletonList(tp0), null); + prepareRebalance(client, node, assignor, List.of(tp0), null); RuntimeException assignmentException = assertThrows(RuntimeException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); @@ -1663,14 +1699,14 @@ public void testUnsubscribeShouldTriggerPartitionsLostWithNoGeneration(GroupProt ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); CooperativeStickyAssignor assignor = new CooperativeStickyAssignor(); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); initializeSubscriptionWithSingleTopic(consumer, getExceptionConsumerRebalanceListener()); - Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); + Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); RuntimeException assignException = assertThrows(RuntimeException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); @@ -1687,9 +1723,9 @@ public void testUnsubscribeShouldTriggerPartitionsLostWithNoGeneration(GroupProt private void initializeSubscriptionWithSingleTopic(KafkaConsumer consumer, ConsumerRebalanceListener consumerRebalanceListener) { - consumer.subscribe(singleton(topic), consumerRebalanceListener); + consumer.subscribe(Set.of(topic), consumerRebalanceListener); // verify that subscription has changed but assignment is still unchanged - assertEquals(singleton(topic), consumer.subscription()); + assertEquals(Set.of(topic), consumer.subscription()); assertEquals(Collections.emptySet(), consumer.assignment()); } @@ -1715,36 +1751,36 @@ public void testManualAssignmentChangeWithAutoCommitEnabled(GroupProtocol groupP Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // manual assignment - consumer.assign(singleton(tp0)); - consumer.seekToBeginning(singleton(tp0)); + consumer.assign(Set.of(tp0)); + consumer.seekToBeginning(Set.of(tp0)); // fetch offset for one topic - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, 0L), Errors.NONE), coordinator); + assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); // verify that assignment immediately changes - assertEquals(consumer.assignment(), singleton(tp0)); + assertEquals(Set.of(tp0), consumer.assignment()); - // there shouldn't be any need to lookup the coordinator or fetch committed offsets. - // we just lookup the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L))); + // there shouldn't be any need to look up the coordinator or fetch committed offsets. + // we just look up the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Map.of(tp0, 10L))); client.prepareResponse(fetchResponse(tp0, 10L, 1)); ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(100)); assertEquals(1, records.count()); assertEquals(11L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(11L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(11L), records.nextOffsets().get(tp0)); // mock the offset commit response for to be revoked partitions AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11); // new manual assignment - consumer.assign(singleton(t2p0)); + consumer.assign(Set.of(t2p0)); // verify that assignment immediately changes - assertEquals(consumer.assignment(), singleton(t2p0)); + assertEquals(Set.of(t2p0), consumer.assignment()); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -1772,39 +1808,39 @@ public void testManualAssignmentChangeWithAutoCommitDisabled(GroupProtocol group Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // manual assignment - consumer.assign(singleton(tp0)); - consumer.seekToBeginning(singleton(tp0)); + consumer.assign(Set.of(tp0)); + consumer.seekToBeginning(Set.of(tp0)); // fetch offset for one topic client.prepareResponseFrom( - offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), + offsetResponse(Map.of(tp0, 0L), Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); + assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); // verify that assignment immediately changes - assertEquals(consumer.assignment(), singleton(tp0)); + assertEquals(Set.of(tp0), consumer.assignment()); - // there shouldn't be any need to lookup the coordinator or fetch committed offsets. - // we just lookup the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L))); + // there shouldn't be any need to look up the coordinator or fetch committed offsets. + // we just look up the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Map.of(tp0, 10L))); client.prepareResponse(fetchResponse(tp0, 10L, 1)); @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(1, records.count()); assertEquals(11L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(11L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(11L), records.nextOffsets().get(tp0)); // new manual assignment - consumer.assign(singleton(t2p0)); + consumer.assign(Set.of(t2p0)); // verify that assignment immediately changes - assertEquals(consumer.assignment(), singleton(t2p0)); + assertEquals(Set.of(t2p0), consumer.assignment()); // the auto commit is disabled, so no offset commit request should be sent for (ClientRequest req : client.requests()) - assertNotSame(req.requestBuilder().apiKey(), ApiKeys.OFFSET_COMMIT); + assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey()); client.requests().clear(); } @@ -1815,7 +1851,7 @@ public void testOffsetOfPausedPartitions(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -1841,12 +1877,12 @@ public void testOffsetOfPausedPartitions(GroupProtocol groupProtocol) { offsets.put(tp1, 0L); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); + assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, 0L); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Collections.singleton(tp1)).get(tp1).offset()); + assertEquals(0, consumer.committed(Set.of(tp1)).get(tp1).offset()); // fetch and verify consumer's position in the two partitions final Map offsetResponse = new HashMap<>(); @@ -1892,7 +1928,7 @@ public void testGracefulClose(GroupProtocol groupProtocol) throws Exception { response.put(tp0, Errors.NONE); OffsetCommitResponse commitResponse = offsetCommitResponse(response); LeaveGroupResponse leaveGroupResponse = new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())); - FetchResponse closeResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>()); + FetchResponse closeResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(), List.of()); consumerCloseTest(groupProtocol, 5000, Arrays.asList(commitResponse, leaveGroupResponse, closeResponse), 0, false); } @@ -1931,7 +1967,7 @@ public void testLeaveGroupTimeout(GroupProtocol groupProtocol) throws Exception Map response = new HashMap<>(); response.put(tp0, Errors.NONE); OffsetCommitResponse commitResponse = offsetCommitResponse(response); - consumerCloseTest(groupProtocol, 5000, singletonList(commitResponse), 5000, false); + consumerCloseTest(groupProtocol, 5000, List.of(commitResponse), 5000, false); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1955,32 +1991,31 @@ public void testCloseInterrupt(GroupProtocol groupProtocol) throws Exception { public void testCloseShouldBeIdempotent(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = spy(new MockClient(time, metadata)); - initMetadata(client, singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.close(Duration.ZERO); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(CloseOptions.timeout(Duration.ZERO)); // verify that the call is idempotent by checking that the network client is only closed once. verify(client).close(); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol groupProtocol) { - try (KafkaConsumer consumer = newConsumer(groupProtocol, null, Optional.of(Boolean.TRUE))) { - fail("Expected an InvalidConfigurationException"); - } catch (InvalidConfigurationException e) { - // OK, expected - } + assertThrows(InvalidConfigurationException.class, + () -> newConsumer(groupProtocol, null, Optional.of(true)), + "Expected an InvalidConfigurationException"); try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(Collections.singleton(topic))); + assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(Set.of(topic))); } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { @@ -1996,9 +2031,9 @@ public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol @EnumSource(GroupProtocol.class) public void testOperationsByAssigningConsumerWithDefaultGroupId(GroupProtocol groupProtocol) { try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - consumer.assign(singleton(tp0)); + consumer.assign(Set.of(tp0)); - assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); assertThrows(InvalidGroupIdException.class, consumer::commitAsync); assertThrows(InvalidGroupIdException.class, consumer::commitSync); } @@ -2012,12 +2047,12 @@ public void testMetricConfigRecordingLevelInfo(GroupProtocol groupProtocol) { props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); KafkaConsumer consumer = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertEquals(Sensor.RecordingLevel.INFO, consumer.metricsRegistry().config().recordLevel()); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); props.put(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG"); KafkaConsumer consumer2 = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertEquals(Sensor.RecordingLevel.DEBUG, consumer2.metricsRegistry().config().recordLevel()); - consumer2.close(Duration.ZERO); + consumer2.close(CloseOptions.timeout(Duration.ZERO)); } // TODO: this test references RPCs to be sent that are not part of the CONSUMER group protocol. @@ -2029,17 +2064,17 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node); client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node); @@ -2052,7 +2087,7 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro new HeartbeatResponseData().setErrorCode(Errors.REBALANCE_IN_PROGRESS.code())), coordinator); // join group - final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(singletonList(topic))); + final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(List.of(topic))); // This member becomes the leader final JoinGroupResponse leaderResponse = new JoinGroupResponse( @@ -2060,7 +2095,7 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro .setErrorCode(Errors.NONE.code()) .setGenerationId(1).setProtocolName(assignor.name()) .setLeader(memberId).setMemberId(memberId) - .setMembers(Collections.singletonList( + .setMembers(List.of( new JoinGroupResponseData.JoinGroupResponseMember() .setMemberId(memberId) .setMetadata(byteBuffer.array()) @@ -2072,16 +2107,16 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro client.prepareResponseFrom(leaderResponse, coordinator); // sync group fails due to disconnect - client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator, true); + client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator, true); // should try and find the new coordinator client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); // rejoin group client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); - client.prepareResponseFrom(body -> body instanceof FetchRequest + client.prepareResponseFrom(body -> body instanceof FetchRequest && ((FetchRequest) body).fetchData(topicNames).containsKey(new TopicIdPartition(topicId, tp0)), fetchResponse(tp0, 1, 1), node); time.sleep(heartbeatIntervalMs); Thread.sleep(heartbeatIntervalMs); @@ -2099,14 +2134,14 @@ private void consumerCloseTest(GroupProtocol groupProtocol, ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, Optional.empty()); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); - client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(topic, 1), topicIds)); + client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, Map.of(topic, 1), topicIds)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); @@ -2124,7 +2159,7 @@ private void consumerCloseTest(GroupProtocol groupProtocol, Future future = executor.submit(() -> { consumer.commitAsync(); try { - consumer.close(Duration.ofMillis(closeTimeoutMs)); + consumer.close(CloseOptions.timeout(Duration.ofMillis(closeTimeoutMs))); } catch (Exception e) { closeException.set(e); } @@ -2136,7 +2171,7 @@ private void consumerCloseTest(GroupProtocol groupProtocol, future.get(100, TimeUnit.MILLISECONDS); if (closeTimeoutMs != 0) fail("Close completed without waiting for commit or leave response"); - } catch (TimeoutException e) { + } catch (TimeoutException swallow) { // Expected exception } @@ -2191,7 +2226,7 @@ public void testPartitionsForNonExistingTopic(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Cluster cluster = metadata.fetch(); MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), @@ -2209,8 +2244,9 @@ public void testPartitionsForNonExistingTopic(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testPartitionsForAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - assertThrows(AuthenticationException.class, () -> consumer.partitionsFor("some other topic")); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + assertThrows(AuthenticationException.class, () -> consumer.partitionsFor("some other topic")); + } } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2218,8 +2254,9 @@ public void testPartitionsForAuthenticationFailure(GroupProtocol groupProtocol) @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testBeginningOffsetsAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - assertThrows(AuthenticationException.class, () -> consumer.beginningOffsets(Collections.singleton(tp0))); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + assertThrows(AuthenticationException.class, () -> consumer.beginningOffsets(Set.of(tp0))); + } } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2227,15 +2264,16 @@ public void testBeginningOffsetsAuthenticationFailure(GroupProtocol groupProtoco @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testEndOffsetsAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - assertThrows(AuthenticationException.class, () -> consumer.endOffsets(Collections.singleton(tp0))); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + assertThrows(AuthenticationException.class, () -> consumer.endOffsets(Set.of(tp0))); + } } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testPollAuthenticationFailure(GroupProtocol groupProtocol) throws InterruptedException { final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - consumer.subscribe(singleton(topic)); + consumer.subscribe(Set.of(topic)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the event added by a call to poll, to be processed @@ -2253,8 +2291,9 @@ public void testPollAuthenticationFailure(GroupProtocol groupProtocol) throws In @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testOffsetsForTimesAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - assertThrows(AuthenticationException.class, () -> consumer.offsetsForTimes(singletonMap(tp0, 0L))); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + assertThrows(AuthenticationException.class, () -> consumer.offsetsForTimes(Map.of(tp0, 0L))); + } } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2262,10 +2301,10 @@ public void testOffsetsForTimesAuthenticationFailure(GroupProtocol groupProtocol @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testCommitSyncAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - Map offsets = new HashMap<>(); - offsets.put(tp0, new OffsetAndMetadata(10L)); - assertThrows(AuthenticationException.class, () -> consumer.commitSync(offsets)); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + Map offsets = Map.of(tp0, new OffsetAndMetadata(10L)); + assertThrows(AuthenticationException.class, () -> consumer.commitSync(offsets)); + } } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2273,24 +2312,27 @@ public void testCommitSyncAuthenticationFailure(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testCommittedAuthenticationFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - assertThrows(AuthenticationException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); + try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { + assertThrows(AuthenticationException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); + } } @ParameterizedTest @EnumSource(value = GroupProtocol.class) public void testMeasureCommitSyncDurationOnFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer - = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis())); + try (final KafkaConsumer consumer + = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis()))) { - try { - consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(10L))); - } catch (final RuntimeException e) { - } + try { + consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(10L))); + } catch (final RuntimeException swallow) { + // swallow + } - final Metric metric = consumer.metrics() - .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); - assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + final Metric metric = consumer.metrics() + .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); + assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + } } @ParameterizedTest @@ -2301,21 +2343,21 @@ public void testMeasureCommitSyncDuration(GroupProtocol groupProtocol) { AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); client.prepareResponseFrom( FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom( - offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE)), + offsetCommitResponse(Map.of(tp0, Errors.NONE)), coordinator ); - consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(10L))); + consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(10L))); final Metric metric = consumer.metrics() .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); @@ -2325,17 +2367,19 @@ public void testMeasureCommitSyncDuration(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class) public void testMeasureCommittedDurationOnFailure(GroupProtocol groupProtocol) { - final KafkaConsumer consumer - = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis())); + try (final KafkaConsumer consumer + = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis()))) { - try { - consumer.committed(Collections.singleton(tp0)); - } catch (final RuntimeException e) { - } + try { + consumer.committed(Set.of(tp0)); + } catch (final RuntimeException swallow) { + // swallow + } - final Metric metric = consumer.metrics() - .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); - assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + final Metric metric = consumer.metrics() + .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); + assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + } } @ParameterizedTest @@ -2347,11 +2391,11 @@ public void testMeasureCommittedDuration(GroupProtocol groupProtocol) { AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 2)); + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(singletonList(tp0)); + consumer.assign(List.of(tp0)); // lookup coordinator client.prepareResponseFrom( @@ -2360,9 +2404,9 @@ public void testMeasureCommittedDuration(GroupProtocol groupProtocol) { // fetch offset for one topic client.prepareResponseFrom( - offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); + offsetResponse(Map.of(tp0, offset1), Errors.NONE), coordinator); - consumer.committed(Collections.singleton(tp0)).get(tp0).offset(); + consumer.committed(Set.of(tp0)).get(tp0).offset(); final Metric metric = consumer.metrics() .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); @@ -2377,30 +2421,30 @@ public void testRebalanceException(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(topic), getExceptionConsumerRebalanceListener()); + consumer.subscribe(Set.of(topic), getExceptionConsumerRebalanceListener()); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); // assign throws KafkaException exc = assertThrows(KafkaException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); assertEquals(partitionAssigned + singleTopicPartition, exc.getCause().getMessage()); // the assignment is still updated regardless of the exception - assertEquals(singleton(tp0), subscription.assignedPartitions()); + assertEquals(Set.of(tp0), subscription.assignedPartitions()); // close's revoke throws - exc = assertThrows(KafkaException.class, () -> consumer.close(Duration.ofMillis(0))); + exc = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(0)))); assertEquals(partitionRevoked + singleTopicPartition, exc.getCause().getCause().getMessage()); - consumer.close(Duration.ofMillis(0)); + consumer.close(CloseOptions.timeout(Duration.ofMillis(0))); // the assignment is still updated regardless of the exception assertTrue(subscription.assignedPartitions().isEmpty()); @@ -2445,9 +2489,9 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(11, records.count()); assertEquals(1L, consumer.position(tp0)); assertEquals(10L, consumer.position(t2p0)); - assertEquals(records.nextOffsets().size(), 2); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(1L, Optional.empty(), "")); - assertEquals(records.nextOffsets().get(t2p0), new OffsetAndMetadata(10L, Optional.empty(), "")); + assertEquals(2, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(1L), records.nextOffsets().get(tp0)); + assertEquals(new OffsetAndMetadata(10L), records.nextOffsets().get(t2p0)); // prepare the next response of the prefetch fetches1.clear(); @@ -2477,11 +2521,11 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws // verify that the fetch still occurred as expected assertEquals(Set.of(topic, topic3), consumer.subscription()); - assertEquals(Collections.singleton(tp0), consumer.assignment()); + assertEquals(Set.of(tp0), consumer.assignment()); assertEquals(1, records.count()); assertEquals(2L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(2L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(2L), records.nextOffsets().get(tp0)); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -2495,11 +2539,11 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws // should not finish the response yet assertEquals(Set.of(topic, topic3), consumer.subscription()); - assertEquals(Collections.singleton(tp0), consumer.assignment()); + assertEquals(Set.of(tp0), consumer.assignment()); assertEquals(1, records.count()); assertEquals(3L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(3L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(3L), records.nextOffsets().get(tp0)); fetches1.clear(); fetches1.put(tp0, new FetchInfo(3, 1)); @@ -2521,8 +2565,8 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(Set.of(tp0, t3p0), consumer.assignment()); assertEquals(4L, consumer.position(tp0)); assertEquals(0L, consumer.position(t3p0)); - assertEquals(recs1.get().nextOffsets().size(), 1); - assertEquals(recs1.get().nextOffsets().get(tp0), new OffsetAndMetadata(4L, Optional.empty(), "")); + assertEquals(1, recs1.get().nextOffsets().size()); + assertEquals(new OffsetAndMetadata(4L), recs1.get().nextOffsets().get(tp0)); fetches1.clear(); fetches1.put(tp0, new FetchInfo(4, 1)); @@ -2539,13 +2583,13 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(5L, consumer.position(tp0)); assertEquals(100L, consumer.position(t3p0)); - assertEquals(recs2.get().nextOffsets().size(), 2); - assertEquals(recs2.get().nextOffsets().get(tp0), new OffsetAndMetadata(5L, Optional.empty(), "")); - assertEquals(recs2.get().nextOffsets().get(t3p0), new OffsetAndMetadata(100L, Optional.empty(), "")); + assertEquals(2, recs2.get().nextOffsets().size()); + assertEquals(new OffsetAndMetadata(5L), recs2.get().nextOffsets().get(tp0)); + assertEquals(new OffsetAndMetadata(100L), recs2.get().nextOffsets().get(t3p0)); client.requests().clear(); consumer.unsubscribe(); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -2556,7 +2600,7 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); final Node node = metadata.fetch().nodes().get(0); final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); @@ -2567,8 +2611,8 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadataOnStart.generationId()); assertEquals(groupInstanceId, groupMetadataOnStart.groupInstanceId()); - consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, singletonList(tp0), null); + consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, List.of(tp0), null); // initial fetch client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node); @@ -2586,10 +2630,10 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { public void testInvalidGroupMetadata(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(singletonList(topic)); + consumer.subscribe(List.of(topic)); // concurrent access is illegal client.enableBlockingUntilWakeup(1); ExecutorService service = Executors.newSingleThreadExecutor(); @@ -2605,7 +2649,7 @@ public void testInvalidGroupMetadata(GroupProtocol groupProtocol) throws Interru } // accessing closed consumer is illegal - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); assertThrows(IllegalStateException.class, consumer::groupMetadata); } @@ -2616,14 +2660,14 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); // throws for unassigned partition assertThrows(IllegalStateException.class, () -> consumer.currentLag(tp0)); - consumer.assign(singleton(tp0)); + consumer.assign(Set.of(tp0)); // poll once to update with the current metadata consumer.poll(Duration.ofMillis(0)); @@ -2655,7 +2699,7 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept // poll once again, which should return the list-offset response // and hence next call would return correct lag result ClientRequest listOffsetRequest = findRequest(client, ApiKeys.LIST_OFFSETS); - client.respondToRequest(listOffsetRequest, listOffsetsResponse(singletonMap(tp0, 90L))); + client.respondToRequest(listOffsetRequest, listOffsetsResponse(Map.of(tp0, 90L))); consumer.poll(Duration.ofMillis(0)); // For AsyncKafkaConsumer, subscription state is updated in background, so the result will eventually be updated. @@ -2669,13 +2713,13 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept // one successful fetch should update the log end offset and the position ClientRequest fetchRequest = findRequest(client, ApiKeys.FETCH); final FetchInfo fetchInfo = new FetchInfo(1L, 99L, 50L, 5); - client.respondToRequest(fetchRequest, fetchResponse(singletonMap(tp0, fetchInfo))); + client.respondToRequest(fetchRequest, fetchResponse(Map.of(tp0, fetchInfo))); final ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(records.nextOffsets().size(), 1); - assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); + assertEquals(1, records.nextOffsets().size()); + assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); // correct lag result assertEquals(OptionalLong.of(45L), consumer.currentLag(tp0)); @@ -2687,15 +2731,15 @@ public void testListOffsetShouldUpdateSubscriptions(GroupProtocol groupProtocol) final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, null, groupInstanceId, false); - consumer.assign(singleton(tp0)); + consumer.assign(Set.of(tp0)); consumer.seek(tp0, 50L); - client.prepareResponse(request -> request instanceof ListOffsetsRequest, listOffsetsResponse(singletonMap(tp0, 90L))); - assertEquals(singletonMap(tp0, 90L), consumer.endOffsets(Collections.singleton(tp0))); + client.prepareResponse(request -> request instanceof ListOffsetsRequest, listOffsetsResponse(Map.of(tp0, 90L))); + assertEquals(Map.of(tp0, 90L), consumer.endOffsets(Set.of(tp0))); // correct lag result should be returned as well assertEquals(OptionalLong.of(40L), consumer.currentLag(tp0)); } @@ -2711,7 +2755,7 @@ private KafkaConsumer consumerWithPendingAuthenticationError(Gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -2840,7 +2884,7 @@ private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordi } private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final TopicPartition partition, final long offset) { - return prepareOffsetCommitResponse(client, coordinator, Collections.singletonMap(partition, offset)); + return prepareOffsetCommitResponse(client, coordinator, Map.of(partition, offset)); } private OffsetCommitResponse offsetCommitResponse(Map responseData) { @@ -2870,16 +2914,26 @@ private SyncGroupResponse syncGroupResponse(List partitions, Err } private OffsetFetchResponse offsetResponse(Map offsets, Errors error) { - Map partitionData = new HashMap<>(); - for (Map.Entry entry : offsets.entrySet()) { - partitionData.put(entry.getKey(), new OffsetFetchResponse.PartitionData(entry.getValue(), - Optional.empty(), "", error)); - } - int throttleMs = 10; + var grouped = offsets.entrySet().stream().collect(Collectors.groupingBy(e -> e.getKey().topic())); + return new OffsetFetchResponse( - throttleMs, - Collections.singletonMap(groupId, Errors.NONE), - Collections.singletonMap(groupId, partitionData)); + new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(groupId) + .setTopics(grouped.entrySet().stream().map(entry -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(entry.getKey()) + .setPartitions(entry.getValue().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition.getKey().partition()) + .setErrorCode(error.code()) + .setCommittedOffset(partition.getValue()) + ).collect(Collectors.toList())) + ).collect(Collectors.toList())) + )), + ApiKeys.OFFSET_FETCH.latestVersion() + ); } private ListOffsetsResponse listOffsetsResponse(Map offsets) { @@ -2939,12 +2993,12 @@ private FetchResponse fetchResponse(Map fetches) { .setLogStartOffset(logStartOffset) .setRecords(records)); } - return FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpResponses); + return FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpResponses, List.of()); } private FetchResponse fetchResponse(TopicPartition partition, long fetchOffset, int count) { FetchInfo fetchInfo = new FetchInfo(fetchOffset, count); - return fetchResponse(Collections.singletonMap(partition, fetchInfo)); + return fetchResponse(Map.of(partition, fetchInfo)); } private KafkaConsumer newConsumer(GroupProtocol groupProtocol, @@ -3027,7 +3081,7 @@ private KafkaConsumer newConsumer(GroupProtocol groupProtocol, Deserializer keyDeserializer = new StringDeserializer(); Deserializer valueDeserializer = valueDeserializerOpt.orElse(new StringDeserializer()); LogContext logContext = new LogContext(); - List assignors = singletonList(assignor); + List assignors = List.of(assignor); ConsumerConfig config = newConsumerConfig( groupProtocol, autoCommitEnabled, @@ -3093,6 +3147,7 @@ private ConsumerConfig newConsumerConfig(GroupProtocol groupProtocol, configs.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoffMs); configs.put(ConsumerConfig.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, throwOnStableOffsetNotSupported); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); groupInstanceId.ifPresent(gi -> configs.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, gi)); return new ConsumerConfig(configs); @@ -3122,7 +3177,7 @@ public void testSubscriptionOnInvalidTopic(GroupProtocol groupProtocol) throws I ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Cluster cluster = metadata.fetch(); String invalidTopicName = "topic abc"; // Invalid topic name due to space @@ -3137,7 +3192,7 @@ public void testSubscriptionOnInvalidTopic(GroupProtocol groupProtocol) throws I client.prepareMetadataUpdate(updateResponse); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); + consumer.subscribe(Set.of(invalidTopicName), getConsumerRebalanceListener(consumer)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the event added by a call to poll, to be processed @@ -3167,10 +3222,10 @@ private static void assertPollEventuallyThrows(KafkaConsum public void testPollTimeMetrics(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singletonList(topic)); + consumer.subscribe(List.of(topic)); // MetricName objects to check Metrics metrics = consumer.metricsRegistry(); MetricName lastPollSecondsAgoName = metrics.metricName("last-poll-seconds-ago", "consumer-metrics"); @@ -3213,7 +3268,7 @@ public void testPollTimeMetrics(GroupProtocol groupProtocol) { public void testPollIdleRatio(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); // MetricName object to check @@ -3257,14 +3312,14 @@ private static boolean consumerMetricPresent(KafkaConsumer consu @ParameterizedTest @EnumSource(GroupProtocol.class) -public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupProtocol) { + public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupProtocol) { Time time = new MockTime(1L); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(singletonList(topic)); + consumer.subscribe(List.of(topic)); assertTrue(consumerMetricPresent(consumer, "last-poll-seconds-ago")); assertTrue(consumerMetricPresent(consumer, "time-between-poll-avg")); assertTrue(consumerMetricPresent(consumer, "time-between-poll-max")); @@ -3279,7 +3334,7 @@ public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupPro @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testEnforceRebalanceWithManualAssignment(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - consumer.assign(singleton(new TopicPartition("topic", 0))); + consumer.assign(Set.of(new TopicPartition("topic", 0))); assertThrows(IllegalStateException.class, consumer::enforceRebalance); } @@ -3303,15 +3358,15 @@ public void testEnforceRebalanceTriggersRebalanceOnNextPoll(GroupProtocol groupP consumer.poll(Duration.ZERO); // onPartitionsRevoked is not invoked when first joining the group - assertEquals(countingRebalanceListener.revokedCount, 0); - assertEquals(countingRebalanceListener.assignedCount, 1); + assertEquals(0, countingRebalanceListener.revokedCount); + assertEquals(1, countingRebalanceListener.assignedCount); consumer.enforceRebalance(); // the next poll should trigger a rebalance consumer.poll(Duration.ZERO); - assertEquals(countingRebalanceListener.revokedCount, 1); + assertEquals(1, countingRebalanceListener.revokedCount); } // NOTE: this test uses the enforceRebalance API which is not implemented in the CONSUMER group protocol. @@ -3335,7 +3390,7 @@ public void testEnforceRebalanceReason(GroupProtocol groupProtocol) { true, groupInstanceId ); - consumer.subscribe(Collections.singletonList(topic)); + consumer.subscribe(List.of(topic)); // Lookup coordinator. client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); @@ -3431,8 +3486,9 @@ public void testOffsetsForTimesTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.offsetsForTimes(singletonMap(tp0, 0L))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.offsetsForTimes(Map.of(tp0, 0L))).getMessage() ); + consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3441,8 +3497,9 @@ public void testBeginningOffsetsTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.beginningOffsets(singletonList(tp0))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.beginningOffsets(List.of(tp0))).getMessage() ); + consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3451,8 +3508,9 @@ public void testEndOffsetsTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.endOffsets(singletonList(tp0))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.endOffsets(List.of(tp0))).getMessage() ); + consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3505,6 +3563,7 @@ public void testClientInstanceIdNoTelemetryReporterRegistered(GroupProtocol grou assertEquals("Telemetry is not enabled. Set config `enable.metrics.push` to `true`.", exception.getMessage()); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testEmptyGroupId(GroupProtocol groupProtocol) { @@ -3513,6 +3572,7 @@ public void testEmptyGroupId(GroupProtocol groupProtocol) { assertEquals("The configured group.id should not be an empty string or whitespace.", e.getCause().getMessage()); } + @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testGroupIdWithWhitespace(GroupProtocol groupProtocol) { @@ -3525,7 +3585,7 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -3533,9 +3593,9 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); } - + final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - + int maxPreparedResponses = GroupProtocol.CLASSIC.equals(groupProtocol) ? 10 : 1; for (int i = 0; i < maxPreparedResponses; i++) { client.prepareResponse( @@ -3545,7 +3605,7 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP }, listOffsetsResponse( Collections.emptyMap(), - Collections.singletonMap(tp0, Errors.UNKNOWN_TOPIC_OR_PARTITION) + Map.of(tp0, Errors.UNKNOWN_TOPIC_OR_PARTITION) )); } @@ -3556,24 +3616,24 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP @EnumSource(GroupProtocol.class) public void testCommittedThrowsTimeoutExceptionForNoResponse(GroupProtocol groupProtocol) { Time time = new MockTime(Duration.ofSeconds(1).toMillis()); - + ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - - initMetadata(client, Collections.singletonMap(topic, 2)); + + initMetadata(client, Map.of(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(singletonList(tp0)); - + consumer.assign(List.of(tp0)); + // lookup coordinator Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - + // try to get committed offsets for one topic-partition - but it is disconnected so there's no response and it will time out - client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator, true); + client.prepareResponseFrom(offsetResponse(Map.of(tp0, 0L), Errors.NONE), coordinator, true); org.apache.kafka.common.errors.TimeoutException timeoutException = assertThrows(org.apache.kafka.common.errors.TimeoutException.class, - () -> consumer.committed(Collections.singleton(tp0), Duration.ofMillis(1000L))); + () -> consumer.committed(Set.of(tp0), Duration.ofMillis(1000L))); assertEquals("Timeout of 1000ms expired before the last committed offset for partitions [test-0] could be determined. " + "Try tuning default.api.timeout.ms larger to relax the threshold.", timeoutException.getMessage()); } @@ -3583,10 +3643,10 @@ public void testCommittedThrowsTimeoutExceptionForNoResponse(GroupProtocol group public void testPreventMultiThread(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(singletonList(topic)); + consumer.subscribe(List.of(topic)); client.enableBlockingUntilWakeup(1); @@ -3608,13 +3668,13 @@ public void testPreventMultiThread(GroupProtocol groupProtocol) throws Interrupt public void testPollSendsRequestToJoin(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); + initMetadata(client, Map.of(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(singletonList(topic)); + consumer.subscribe(List.of(topic)); assertFalse(groupProtocol == GroupProtocol.CLASSIC ? requestGenerated(client, ApiKeys.JOIN_GROUP) : requestGenerated(client, ApiKeys.CONSUMER_GROUP_HEARTBEAT), @@ -3699,6 +3759,7 @@ void testMonitorablePlugins(GroupProtocol groupProtocol) { configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); + configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name); configs.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MonitorableInterceptor.class.getName()); KafkaConsumer consumer = new KafkaConsumer<>(configs); @@ -3725,7 +3786,7 @@ void testMonitorablePlugins(GroupProtocol groupProtocol) { assertTrue(metrics.containsKey(expectedInterceptorMetric)); assertEquals(VALUE, metrics.get(expectedInterceptorMetric).metricValue()); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); metrics = consumer.metrics(); assertFalse(metrics.containsKey(expectedKeyDeserializerMetric)); assertFalse(metrics.containsKey(expectedValueDeserializerMetric)); @@ -3746,9 +3807,13 @@ private MetricName expectedMetricName(String clientId, String config, Class c private static final String NAME = "name"; private static final String DESCRIPTION = "description"; - private static final Map TAGS = Collections.singletonMap("k", "v"); + private static final LinkedHashMap TAGS = new LinkedHashMap<>(); private static final double VALUE = 123.0; + static { + TAGS.put("t1", "v1"); + } + public static class MonitorableDeserializer extends MockDeserializer implements Monitorable { @Override @@ -3766,4 +3831,33 @@ public void withPluginMetrics(PluginMetrics metrics) { metrics.addMetric(name, (Measurable) (config, now) -> VALUE); } } + + public static class CloseInterceptor implements ConsumerInterceptor { + + public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); + + @Override + public ConsumerRecords onConsume(ConsumerRecords records) { + return null; + } + + @Override + public void onCommit(Map offsets) { + // no-op + } + + @Override + public void close() { + CLOSE_COUNT.incrementAndGet(); + } + + @Override + public void configure(Map configs) { + // no-op + } + + public static void resetCounters() { + CLOSE_COUNT.set(0); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java index ad46bf0887ebc..a5417c3e00fd1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java @@ -367,6 +367,7 @@ private ShareConsumerConfig newConsumerConfig(String groupId, configs.put(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG, retryBackoffMaxMs); configs.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoffMs); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new ShareConsumerConfig(configs); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerTest.java index 3a0f3461c0923..1a6d76dbabfc3 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerTest.java @@ -166,9 +166,9 @@ public void testVerifyFetchAndCommitSyncImplicit() { return request.data().groupId().equals(groupId) && request.data().shareSessionEpoch() == 0 && request.data().batchSize() == batchSize && - request.data().topics().get(0).topicId().equals(topicId1) && - request.data().topics().get(0).partitions().size() == 1 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().isEmpty(); + request.data().topics().stream().findFirst().get().topicId().equals(topicId1) && + request.data().topics().stream().findFirst().get().partitions().size() == 1 && + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().isEmpty(); } else { return false; } @@ -180,10 +180,10 @@ public void testVerifyFetchAndCommitSyncImplicit() { ShareAcknowledgeRequest request = (ShareAcknowledgeRequest) body; return request.data().groupId().equals(groupId) && request.data().shareSessionEpoch() == 1 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).firstOffset() == 0 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).lastOffset() == 1 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).acknowledgeTypes().size() == 1 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).acknowledgeTypes().get(0) == (byte) 1; + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().get(0).firstOffset() == 0 && + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().get(0).lastOffset() == 1 && + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().get(0).acknowledgeTypes().size() == 1 && + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().get(0).acknowledgeTypes().get(0) == (byte) 1; } else { return false; } @@ -243,9 +243,9 @@ public void testVerifyFetchAndCloseImplicit() { return request.data().groupId().equals(groupId) && request.data().shareSessionEpoch() == 0 && request.data().batchSize() == batchSize && - request.data().topics().get(0).topicId().equals(topicId1) && - request.data().topics().get(0).partitions().size() == 1 && - request.data().topics().get(0).partitions().get(0).acknowledgementBatches().isEmpty(); + request.data().topics().stream().findFirst().get().topicId().equals(topicId1) && + request.data().topics().stream().findFirst().get().partitions().size() == 1 && + request.data().topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().isEmpty(); } else { return false; } @@ -307,6 +307,7 @@ private ShareConsumerConfig newConsumerConfig(String clientId) { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); configs.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, batchSize); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new ShareConsumerConfig(configs); } @@ -411,7 +412,7 @@ private ShareAcknowledgeResponse shareAcknowledgeResponse(TopicIdPartition tip) .setPartitions(List.of(partData)); return new ShareAcknowledgeResponse( new ShareAcknowledgeResponseData() - .setResponses(List.of(topicResponse)) + .setResponses(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection(List.of(topicResponse).iterator())) ); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java index 647976b1d1df4..6968b45a57b66 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java @@ -209,9 +209,7 @@ public void shouldReturnMaxPollRecords() { consumer.assign(Collections.singleton(partition)); consumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L)); - IntStream.range(0, 10).forEach(offset -> { - consumer.addRecord(new ConsumerRecord<>("test", 0, offset, null, null)); - }); + IntStream.range(0, 10).forEach(offset -> consumer.addRecord(new ConsumerRecord<>("test", 0, offset, null, null))); consumer.setMaxPollRecords(2L); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java index 3035703ff37ab..c1a13c054eea4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java @@ -65,4 +65,19 @@ public void testDeserializationCompatibilityWithLeaderEpoch() throws IOException assertEquals(new OffsetAndMetadata(10, Optional.of(235), "test commit metadata"), deserializedObject); } + @Test + public void testEqualsWithNullAndNegativeLeaderEpoch() { + OffsetAndMetadata metadataWithNullEpoch = new OffsetAndMetadata(100L, Optional.empty(), "metadata"); + OffsetAndMetadata metadataWithNegativeEpoch = new OffsetAndMetadata(100L, Optional.of(-1), "metadata"); + assertEquals(metadataWithNullEpoch, metadataWithNegativeEpoch); + assertEquals(metadataWithNullEpoch.hashCode(), metadataWithNegativeEpoch.hashCode()); + } + + @Test + public void testEqualsWithNullAndEmptyMetadata() { + OffsetAndMetadata metadataWithNullMetadata = new OffsetAndMetadata(100L, Optional.of(1), null); + OffsetAndMetadata metadataWithEmptyMetadata = new OffsetAndMetadata(100L, Optional.of(1), ""); + assertEquals(metadataWithNullMetadata, metadataWithEmptyMetadata); + assertEquals(metadataWithNullMetadata.hashCode(), metadataWithEmptyMetadata.hashCode()); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java index 55d39db39a12c..a0de7abdf195a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java @@ -135,12 +135,7 @@ private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs) { Optional.empty(), Optional.empty()); } - private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional groupInstanceId, Optional> heartbeatThreadSupplier) { - setupCoordinator(retryBackoffMs, retryBackoffMaxMs, rebalanceTimeoutMs, groupInstanceId, heartbeatThreadSupplier, groupInstanceId.isEmpty()); - } - - private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional groupInstanceId, Optional> heartbeatThreadSupplier, boolean leaveOnClose) { LogContext logContext = new LogContext(); this.mockTime = new MockTime(); ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L, @@ -166,9 +161,9 @@ false, false, new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST HEARTBEAT_INTERVAL_MS, GROUP_ID, groupInstanceId, + null, retryBackoffMs, - retryBackoffMaxMs, - leaveOnClose); + retryBackoffMaxMs); this.coordinator = new DummyCoordinator(rebalanceConfig, consumerClient, metrics, @@ -1108,7 +1103,7 @@ public void testLeaveGroupSentWithGroupInstanceIdUnSet() { @ParameterizedTest @MethodSource("groupInstanceIdAndMembershipOperationMatrix") public void testLeaveGroupSentWithGroupInstanceIdUnSetAndDifferentGroupMembershipOperation(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation) { - checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty(), true); + checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty()); } private static Stream groupInstanceIdAndMembershipOperationMatrix() { @@ -1123,11 +1118,11 @@ private static Stream groupInstanceIdAndMembershipOperationMatrix() { } private void checkLeaveGroupRequestSent(Optional groupInstanceId) { - checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty(), groupInstanceId.isEmpty()); + checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty()); } - private void checkLeaveGroupRequestSent(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional> heartbeatThreadSupplier, boolean leaveOnClose) { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier, leaveOnClose); + private void checkLeaveGroupRequestSent(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional> heartbeatThreadSupplier) { + setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1505,7 +1500,7 @@ public boolean matches(AbstractRequest body) { } @Test - public void testWakeupAfterSyncGroupReceived() throws Exception { + public void testWakeupAfterSyncGroupReceived() { setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class))); @@ -1537,7 +1532,7 @@ public void testWakeupAfterSyncGroupReceived() throws Exception { } @Test - public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception { + public void testWakeupAfterSyncGroupReceivedExternalCompletion() { setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class))); @@ -1601,12 +1596,9 @@ public void testAuthenticationErrorInEnsureCoordinatorReady() { mockClient.createPendingAuthenticationError(node, 300); - try { - coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE)); - fail("Expected an authentication error."); - } catch (AuthenticationException e) { - // OK - } + assertThrows(AuthenticationException.class, + () -> coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE)), + "Expected an authentication error."); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java index fe6b4d100ff2a..4e9525264a01d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java @@ -1025,7 +1025,7 @@ public void testAssignmentUpdatedForDeletedTopic(RackConfig rackConfig) { Map> assignment = assignor.assignPartitions(partitionsPerTopic, subscriptions); assertTrue(assignor.partitionsTransferringOwnership.isEmpty()); - assertEquals(assignment.values().stream().mapToInt(List::size).sum(), 1 + 100); + assertEquals(1 + 100, assignment.values().stream().mapToInt(List::size).sum()); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(isFullyBalanced(assignment)); } @@ -1043,7 +1043,7 @@ public void testNoExceptionThrownWhenOnlySubscribedTopicDeleted(RackConfig rackC assignment = assignor.assign(Collections.emptyMap(), subscriptions); assertTrue(assignor.partitionsTransferringOwnership.isEmpty()); - assertEquals(assignment.size(), 1); + assertEquals(1, assignment.size()); assertTrue(assignment.get(consumerId).isEmpty()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java index 779df4fb43c4a..b6818ab51b5cf 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java @@ -83,7 +83,7 @@ public void testSingleStateMultiRecord() { } @Test - public void testSingleAcknowledgementTypeExceedingLimit() { + public void testSingleAcknowledgeTypeExceedingLimit() { int i = 0; for (; i < maxRecordsWithSameAcknowledgeType; i++) { acks.add(i, AcknowledgeType.ACCEPT); @@ -119,7 +119,7 @@ public void testSingleAcknowledgementTypeExceedingLimit() { } @Test - public void testSingleAcknowledgementTypeWithGap() { + public void testSingleAcknowledgeTypeWithGap() { for (int i = 0; i < maxRecordsWithSameAcknowledgeType; i++) { acks.add(i, null); } @@ -186,7 +186,7 @@ public void testOptimiseBatches() { } @Test - public void testSingleAcknowledgementTypeWithinLimit() { + public void testSingleAcknowledgeTypeWithinLimit() { acks.add(0L, AcknowledgeType.ACCEPT); acks.add(1L, AcknowledgeType.ACCEPT); acks.add(2L, AcknowledgeType.ACCEPT); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java index 3430719b16ee6..402697227ee80 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java @@ -27,7 +27,8 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; @@ -44,10 +45,11 @@ public class ApplicationEventHandlerTest { private final RequestManagers requestManagers = mock(RequestManagers.class); private final CompletableEventReaper applicationEventReaper = mock(CompletableEventReaper.class); - @Test - public void testRecordApplicationEventQueueSize() { + @ParameterizedTest + @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") + public void testRecordApplicationEventQueueSize(String groupName) { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = spy(new AsyncConsumerMetrics(metrics)); + AsyncConsumerMetrics asyncConsumerMetrics = spy(new AsyncConsumerMetrics(metrics, groupName)); ApplicationEventHandler applicationEventHandler = new ApplicationEventHandler( new LogContext(), time, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 074fe7ef4a7e6..8e44b3fcc25d5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.Metadata.LeaderAndEpoch; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NodeApiVersions; +import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; @@ -29,7 +30,6 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; @@ -68,7 +68,6 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.errors.WakeupException; @@ -84,6 +83,7 @@ import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -117,6 +117,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -181,9 +182,9 @@ public void resetAll() { backgroundEventQueue.clear(); if (consumer != null) { try { - consumer.close(Duration.ZERO); - } catch (Exception e) { - // best effort to clean up after each test, but may throw (ex. if callbacks where + consumer.close(CloseOptions.timeout(Duration.ZERO)); + } catch (Exception swallow) { + // best effort to clean up after each test, but may throw (ex. if callbacks were // throwing errors) } } @@ -205,6 +206,13 @@ private AsyncKafkaConsumer newConsumerWithoutGroupId() { } private AsyncKafkaConsumer newConsumer(Properties props) { + return newConsumerWithStreamRebalanceData(props, null); + } + + private AsyncKafkaConsumer newConsumerWithStreamRebalanceData( + Properties props, + StreamsRebalanceData streamsRebalanceData + ) { // disable auto-commit by default, so we don't need to handle SyncCommitEvent for each case if (!props.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) { props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); @@ -215,12 +223,12 @@ private AsyncKafkaConsumer newConsumer(Properties props) { new StringDeserializer(), new StringDeserializer(), time, - (a, b, c, d, e, f, g, h) -> applicationEventHandler, - a -> backgroundEventReaper, - (a, b, c, d, e, f, g) -> fetchCollector, - (a, b, c, d) -> metadata, + (logContext, time, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler, + logContext -> backgroundEventReaper, + (logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector, + (consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata, backgroundEventQueue, - Optional.empty() + Optional.ofNullable(streamsRebalanceData) ); } @@ -230,10 +238,10 @@ private AsyncKafkaConsumer newConsumer(ConsumerConfig config) { new StringDeserializer(), new StringDeserializer(), time, - (a, b, c, d, e, f, g, h) -> applicationEventHandler, - a -> backgroundEventReaper, - (a, b, c, d, e, f, g) -> fetchCollector, - (a, b, c, d) -> metadata, + (logContext, time, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler, + logContext -> backgroundEventReaper, + (logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector, + (consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata, backgroundEventQueue, Optional.empty() ); @@ -243,16 +251,13 @@ private AsyncKafkaConsumer newConsumer( FetchBuffer fetchBuffer, ConsumerInterceptors interceptors, ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, - SubscriptionState subscriptions, - String groupId, - String clientId, - boolean autoCommitEnabled) { + SubscriptionState subscriptions) { long retryBackoffMs = 100L; int requestTimeoutMs = 30000; int defaultApiTimeoutMs = 1000; return new AsyncKafkaConsumer<>( new LogContext(), - clientId, + "client-id", new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics), fetchBuffer, fetchCollector, @@ -268,8 +273,8 @@ private AsyncKafkaConsumer newConsumer( retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, - groupId, - autoCommitEnabled); + "group-id", + false); } @Test @@ -312,7 +317,7 @@ public void testCommitAsyncWithNullCallback() { // Clean-up. Close the consumer here as we know it will cause a TimeoutException to be thrown. // If we get an error *other* than the TimeoutException, we'll fail the test. try { - Exception e = assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); + Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); assertInstanceOf(TimeoutException.class, e.getCause()); } finally { consumer = null; @@ -331,7 +336,7 @@ public void testCommitAsyncUserSuppliedCallbackNoException() { assertDoesNotThrow(() -> consumer.commitAsync(offsets, callback)); forceCommitCallbackInvocation(); - assertEquals(callback.invoked, 1); + assertEquals(1, callback.invoked); assertNull(callback.exception); } @@ -351,6 +356,26 @@ public void testCommitAsyncUserSuppliedCallbackWithException(Exception exception assertSame(exception.getClass(), callback.exception.getClass()); } + @Test + public void testCommitAsyncShouldCopyOffsets() { + consumer = newConsumer(); + + TopicPartition tp = new TopicPartition("t0", 2); + Map offsets = new HashMap<>(); + offsets.put(tp, new OffsetAndMetadata(10L)); + + markOffsetsReadyForCommitEvent(); + consumer.commitAsync(offsets, null); + + final ArgumentCaptor commitEventCaptor = ArgumentCaptor.forClass(AsyncCommitEvent.class); + verify(applicationEventHandler).add(commitEventCaptor.capture()); + final AsyncCommitEvent commitEvent = commitEventCaptor.getValue(); + assertTrue(commitEvent.offsets().isPresent()); + assertTrue(commitEvent.offsets().get().containsKey(tp)); + offsets.remove(tp); + assertTrue(commitEvent.offsets().get().containsKey(tp)); + } + private static Stream commitExceptionSupplier() { return Stream.of( new KafkaException("Test exception"), @@ -584,6 +609,26 @@ public void testCommitSyncAwaitsCommitAsyncButDoesNotFail() { assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100))); } + @Test + public void testCommitSyncShouldCopyOffsets() { + consumer = newConsumer(); + + TopicPartition tp = new TopicPartition("t0", 2); + Map offsets = new HashMap<>(); + offsets.put(tp, new OffsetAndMetadata(10L)); + + completeCommitSyncApplicationEventSuccessfully(); + consumer.commitSync(offsets); + + final ArgumentCaptor commitEventCaptor = ArgumentCaptor.forClass(SyncCommitEvent.class); + verify(applicationEventHandler).add(commitEventCaptor.capture()); + final SyncCommitEvent commitEvent = commitEventCaptor.getValue(); + assertTrue(commitEvent.offsets().isPresent()); + assertTrue(commitEvent.offsets().get().containsKey(tp)); + offsets.remove(tp); + assertTrue(commitEvent.offsets().get().containsKey(tp)); + } + private CompletableFuture setUpConsumerWithIncompleteAsyncCommit(TopicPartition tp) { time = new MockTime(1); consumer = newConsumer(); @@ -629,9 +674,7 @@ public void testEnsurePollExecutedCommitAsyncCallbacks() { consumer.assign(Collections.singleton(new TopicPartition("foo", 0))); assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback)); markReconcileAndAutoCommitCompleteForPollEvent(); - assertMockCommitCallbackInvoked(() -> consumer.poll(Duration.ZERO), - callback, - null); + assertMockCommitCallbackInvoked(() -> consumer.poll(Duration.ZERO), callback); } @Test @@ -641,9 +684,7 @@ public void testEnsureShutdownExecutedCommitAsyncCallbacks() { MockCommitCallback callback = new MockCommitCallback(); completeCommitAsyncApplicationEventSuccessfully(); assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback)); - assertMockCommitCallbackInvoked(() -> consumer.close(), - callback, - null); + assertMockCommitCallbackInvoked(() -> consumer.close(), callback); } @Test @@ -664,11 +705,8 @@ public void testCloseLeavesGroup(long timeoutMs) { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id", - false)); - consumer.close(Duration.ofMillis(timeoutMs)); + subscriptions)); + consumer.close(CloseOptions.timeout(Duration.ofMillis(timeoutMs))); verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); } @@ -687,13 +725,10 @@ public void testCloseLeavesGroupDespiteOnPartitionsLostError() { mock(FetchBuffer.class), new ConsumerInterceptors<>(Collections.emptyList(), metrics), invoker, - subscriptions, - "group-id", - "client-id", - false)); + subscriptions)); consumer.setGroupAssignmentSnapshot(partitions); - Throwable t = assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); + Throwable t = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); assertNotNull(t.getCause()); assertEquals(rootError, t.getCause()); @@ -711,15 +746,12 @@ public void testCloseLeavesGroupDespiteInterrupt(long timeoutMs) { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id", - false)); + subscriptions)); Duration timeout = Duration.ofMillis(timeoutMs); try { - assertThrows(InterruptException.class, () -> consumer.close(timeout)); + assertThrows(InterruptException.class, () -> consumer.close(CloseOptions.timeout(timeout))); } finally { Thread.interrupted(); } @@ -735,10 +767,7 @@ public void testCommitSyncAllConsumed() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id", - false); + subscriptions); completeTopicSubscriptionChangeEventSuccessfully(); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0))); @@ -760,10 +789,7 @@ public void testAutoCommitSyncDisabled() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id", - false); + subscriptions); completeTopicSubscriptionChangeEventSuccessfully(); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0))); @@ -774,15 +800,10 @@ public void testAutoCommitSyncDisabled() { verify(applicationEventHandler, never()).add(any(SyncCommitEvent.class)); } - private void assertMockCommitCallbackInvoked(final Executable task, - final MockCommitCallback callback, - final Errors errors) { + private void assertMockCommitCallbackInvoked(final Executable task, final MockCommitCallback callback) { assertDoesNotThrow(task); assertEquals(1, callback.invoked); - if (errors == null) - assertNull(callback.exception); - else if (errors.exception() instanceof RetriableException) - assertInstanceOf(RetriableCommitFailedException.class, callback.exception); + assertNull(callback.exception); } private static class MockCommitCallback implements OffsetCommitCallback { @@ -985,9 +1006,8 @@ public void testBeginningOffsetsWithZeroTimeout() { TopicPartition tp = new TopicPartition("topic1", 0); Map result = assertDoesNotThrow(() -> consumer.beginningOffsets(Collections.singletonList(tp), Duration.ZERO)); - // The result should be {tp=null} - assertTrue(result.containsKey(tp)); - assertNull(result.get(tp)); + assertNotNull(result); + assertEquals(0, result.size()); verify(applicationEventHandler).add(ArgumentMatchers.isA(ListOffsetsEvent.class)); } @@ -1044,7 +1064,7 @@ public void testNoWakeupInCloseCommit() { return null; }).when(applicationEventHandler).add(any()); completeUnsubscribeApplicationEventSuccessfully(); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); // A commit was triggered and not completed exceptionally by the wakeup assertNotNull(capturedEvent.get()); @@ -1067,7 +1087,7 @@ public void testCloseAwaitPendingAsyncCommitIncomplete() { markOffsetsReadyForCommitEvent(); consumer.commitAsync(); - Exception e = assertThrows(KafkaException.class, () -> consumer.close(Duration.ofMillis(10))); + Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10)))); assertInstanceOf(TimeoutException.class, e.getCause()); } @@ -1088,7 +1108,7 @@ public void testCloseAwaitPendingAsyncCommitComplete() { consumer.commitAsync(cb); completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.close(Duration.ofMillis(10))); + assertDoesNotThrow(() -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10)))); assertEquals(1, cb.invoked); } @@ -1104,7 +1124,7 @@ public void testInterceptorAutoCommitOnClose() { completeCommitSyncApplicationEventSuccessfully(); completeUnsubscribeApplicationEventSuccessfully(); - consumer.close(Duration.ZERO); + consumer.close(CloseOptions.timeout(Duration.ZERO)); assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); @@ -1178,14 +1198,14 @@ public void testNoInterceptorCommitAsyncFailed() { @Test public void testRefreshCommittedOffsetsShouldNotResetIfFailedWithTimeout() { consumer = newConsumer(); - testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(true); + testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(); } @Test public void testRefreshCommittedOffsetsNotCalledIfNoGroupId() { // Create consumer without group id so committed offsets are not used for updating positions consumer = newConsumerWithoutGroupId(); - testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(false); + testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(); } @Test @@ -1371,6 +1391,51 @@ public void testGroupMetadataIsResetAfterUnsubscribe() { assertEquals(groupMetadataAfterUnsubscribe, consumer.groupMetadata()); } + private Optional captureStreamRebalanceData(final MockedStatic requestManagers) { + ArgumentCaptor> streamRebalanceData = ArgumentCaptor.forClass(Optional.class); + requestManagers.verify(() -> RequestManagers.supplier( + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + any(), + streamRebalanceData.capture() + )); + return streamRebalanceData.getValue(); + } + + @Test + public void testEmptyStreamRebalanceData() { + final String groupId = "consumerGroupA"; + try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { + consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId)); + final Optional groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers); + assertTrue(groupMetadataUpdateListener.isEmpty()); + } + } + + @Test + public void testStreamRebalanceData() { + final String groupId = "consumerGroupA"; + try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { + StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); + consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); + final Optional groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers); + assertTrue(groupMetadataUpdateListener.isPresent()); + assertEquals(streamsRebalanceData, groupMetadataUpdateListener.get()); + } + } + /** * Tests that the consumer correctly invokes the callbacks for {@link ConsumerRebalanceListener} that was * specified. We don't go through the full effort to emulate heartbeats and correct group management here. We're @@ -1569,10 +1634,7 @@ public void testEnsurePollEventSentOnConsumerPoll() { mock(FetchBuffer.class), new ConsumerInterceptors<>(Collections.emptyList(), metrics), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions, - "group-id", - "client-id", - false); + subscriptions); final TopicPartition tp = new TopicPartition("topic", 0); final List> records = singletonList( new ConsumerRecord<>("topic", 0, 2, "key1", "value1")); @@ -1595,7 +1657,7 @@ private Properties requiredConsumerConfigAndGroupId(final String groupId) { return props; } - private void testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(boolean committedOffsetsEnabled) { + private void testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout() { completeFetchedCommittedOffsetApplicationEventExceptionally(new TimeoutException()); doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class)); when(applicationEventHandler.addAndGet(any(CheckAndUpdatePositionsEvent.class))).thenReturn(true); @@ -1952,17 +2014,14 @@ public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - mock(SubscriptionState.class), - "group-id", - "client-id", - false); + mock(SubscriptionState.class)); Metrics metrics = consumer.metricsRegistry(); - AsyncConsumerMetrics kafkaConsumerMetrics = consumer.kafkaConsumerMetrics(); + AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics(); ConsumerRebalanceListenerCallbackNeededEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, Collections.emptySortedSet()); event.setEnqueuedMs(time.milliseconds()); backgroundEventQueue.add(event); - kafkaConsumerMetrics.recordBackgroundEventQueueSize(1); + asyncConsumerMetrics.recordBackgroundEventQueueSize(1); time.sleep(10); consumer.processBackgroundEvents(); @@ -1971,6 +2030,28 @@ public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() { assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP)).metricValue()); } + @Test + public void testFailConstructor() { + final Properties props = requiredConsumerConfig(); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id"); + props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class"); + final ConsumerConfig config = new ConsumerConfig(props); + + try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + KafkaException ce = assertThrows( + KafkaException.class, + () -> newConsumer(config)); + assertTrue(ce.getMessage().contains("Failed to construct kafka consumer"), "Unexpected exception message: " + ce.getMessage()); + assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); + + boolean npeLogged = appender.getEvents().stream() + .flatMap(event -> event.getThrowableInfo().stream()) + .anyMatch(str -> str.contains("NullPointerException")); + + assertFalse(npeLogged, "Unexpected NullPointerException during consumer construction"); + } + } + private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); @@ -2129,6 +2210,71 @@ private void markOffsetsReadyForCommitEvent() { }).when(applicationEventHandler).add(ArgumentMatchers.isA(CommitEvent.class)); } + @Test + public void testCloseInvokesStreamsRebalanceListenerOnTasksRevokedWhenMemberEpochPositive() { + final String groupId = "streamsGroup"; + final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); + + try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { + consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); + StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); + consumer.subscribe(singletonList("topic"), mockStreamsListener); + final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); + final int memberEpoch = 42; + final String memberId = "memberId"; + groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); + + consumer.close(CloseOptions.timeout(Duration.ZERO)); + + verify(mockStreamsListener).onTasksRevoked(any()); + } + } + + @Test + public void testCloseInvokesStreamsRebalanceListenerOnAllTasksLostWhenMemberEpochZeroOrNegative() { + final String groupId = "streamsGroup"; + final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); + + try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { + consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); + StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); + consumer.subscribe(singletonList("topic"), mockStreamsListener); + final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); + final int memberEpoch = 0; + final String memberId = "memberId"; + groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); + + consumer.close(CloseOptions.timeout(Duration.ZERO)); + + verify(mockStreamsListener).onAllTasksLost(); + } + } + + @Test + public void testCloseWrapsStreamsRebalanceListenerException() { + final String groupId = "streamsGroup"; + final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); + + try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { + consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); + StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); + RuntimeException testException = new RuntimeException("Test streams listener exception"); + doThrow(testException).when(mockStreamsListener).onTasksRevoked(any()); + consumer.subscribe(singletonList("topic"), mockStreamsListener); + final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); + final int memberEpoch = 1; + final String memberId = "memberId"; + groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); + + KafkaException thrownException = assertThrows(KafkaException.class, + () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); + + assertInstanceOf(RuntimeException.class, thrownException.getCause()); + assertTrue(thrownException.getCause().getMessage().contains("Test streams listener exception")); + verify(mockStreamsListener).onTasksRevoked(any()); + } + } + private void markReconcileAndAutoCommitCompleteForPollEvent() { doAnswer(invocation -> { PollEvent event = invocation.getArgument(0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java index 63269b6f5542d..7a999e51163aa 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java @@ -23,22 +23,23 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics.BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME; import static org.junit.jupiter.api.Assertions.assertEquals; public class BackgroundEventHandlerTest { private final BlockingQueue backgroundEventsQueue = new LinkedBlockingQueue<>(); - @Test - public void testRecordBackgroundEventQueueSize() { + @ParameterizedTest + @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") + public void testRecordBackgroundEventQueueSize(String groupName) { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics)) { + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName)) { BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( backgroundEventsQueue, new MockTime(0), @@ -48,7 +49,7 @@ public void testRecordBackgroundEventQueueSize() { assertEquals( 1, (double) metrics.metric( - metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName) ).metricValue() ); @@ -57,7 +58,7 @@ public void testRecordBackgroundEventQueueSize() { assertEquals( 0, (double) metrics.metric( - metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName) ).metricValue() ); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index add8529f1f339..afbb81eb53fce 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -36,6 +36,7 @@ import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.message.OffsetFetchRequestData; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.ApiKeys; @@ -429,16 +430,6 @@ public void testCommitSyncRetriedAfterExpectedRetriableException(Errors error) { assertExceptionHandling(commitRequestManager, error, true); } - private static Stream commitSyncExpectedExceptions() { - return Stream.of( - Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), - Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); - } - @Test public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { CommitRequestManager commitRequestManager = create(false, 100); @@ -727,7 +718,7 @@ public void testOffsetFetchRequestErroredRequests(final Errors error) { @ParameterizedTest @MethodSource("offsetFetchExceptionSupplier") - public void testOffsetFetchRequestTimeoutRequests(final Errors error, + public void testOffsetFetchRequestTimeoutRequests(final Errors error, final Class expectedExceptionClass) { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -762,10 +753,10 @@ public void testSuccessfulOffsetFetch() { CommitRequestManager commitManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); + TopicPartition tp = new TopicPartition("topic1", 0); long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> fetchResult = - commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), - deadlineMs); + commitManager.fetchOffsets(Collections.singleton(tp), deadlineMs); // Send fetch request NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -774,14 +765,23 @@ public void testSuccessfulOffsetFetch() { assertFalse(fetchResult.isDone()); // Complete request with a response - TopicPartition tp = new TopicPartition("topic1", 0); long expectedOffset = 100; + String expectedMetadata = "metadata"; NetworkClientDelegate.UnsentRequest req = result.unsentRequests.get(0); - Map topicPartitionData = - Collections.singletonMap( - tp, - new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); - req.handler().onComplete(buildOffsetFetchClientResponse(req, topicPartitionData, Errors.NONE, false)); + OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(DEFAULT_GROUP_ID) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(tp.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp.partition()) + .setCommittedOffset(expectedOffset) + .setCommittedLeaderEpoch(1) + .setMetadata(expectedMetadata) + )) + )); + req.handler().onComplete(buildOffsetFetchClientResponse(req, groupResponse, false)); // Validate request future completes with the response received assertTrue(fetchResult.isDone()); @@ -796,6 +796,7 @@ public void testSuccessfulOffsetFetch() { assertEquals(1, offsetsAndMetadata.size()); assertTrue(offsetsAndMetadata.containsKey(tp)); assertEquals(expectedOffset, offsetsAndMetadata.get(tp).offset()); + assertEquals(expectedMetadata, offsetsAndMetadata.get(tp).metadata()); assertEquals(0, commitManager.pendingRequests.inflightOffsetFetches.size(), "Inflight " + "request should be removed from the queue when a response is received."); } @@ -916,7 +917,7 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { @ParameterizedTest @MethodSource("offsetCommitExceptionSupplier") public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires( - final Errors error, + final Errors error, final Class expectedExceptionClass) { CommitRequestManager commitRequestManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -1338,7 +1339,7 @@ private static Stream offsetCommitExceptionSupplier() { Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, TimeoutException.class), Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), - + // Non-retriable errors should result in their specific exceptions Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, OffsetMetadataTooLarge.class), @@ -1346,7 +1347,7 @@ private static Stream offsetCommitExceptionSupplier() { Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, TopicAuthorizationException.class), Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), Arguments.of(Errors.STALE_MEMBER_EPOCH, CommitFailedException.class), - + // Generic errors should result in KafkaException Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } @@ -1363,7 +1364,7 @@ private static Stream offsetFetchExceptionSupplier() { Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, TimeoutException.class), Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), - + // Non-retriable errors should result in their specific exceptions Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, KafkaException.class), @@ -1374,7 +1375,7 @@ private static Stream offsetFetchExceptionSupplier() { // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new // member epoch is received. Tested separately. Arguments.of(Errors.STALE_MEMBER_EPOCH, StaleMemberEpochException.class), - + // Generic errors should result in KafkaException Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } @@ -1410,15 +1411,43 @@ public void testOffsetFetchRequestPartitionDataError(final Errors error, final b assertEquals(1, res.unsentRequests.size()); // Setting 1 partition with error - HashMap topicPartitionData = new HashMap<>(); - topicPartitionData.put(tp1, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", error)); - topicPartitionData.put(tp2, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", Errors.NONE)); - topicPartitionData.put(tp3, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", error)); + OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(DEFAULT_GROUP_ID) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(tp1.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp1.partition()) + .setCommittedOffset(100L) + .setCommittedLeaderEpoch(1) + .setMetadata("metadata") + .setErrorCode(error.code()) + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(tp2.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp2.partition()) + .setCommittedOffset(100L) + .setCommittedLeaderEpoch(1) + .setMetadata("metadata") + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(tp3.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp3.partition()) + .setCommittedOffset(100L) + .setCommittedLeaderEpoch(1) + .setMetadata("metadata") + .setErrorCode(error.code()) + )) + )); res.unsentRequests.get(0).handler().onComplete(buildOffsetFetchClientResponse( res.unsentRequests.get(0), - topicPartitionData, - Errors.NONE, + groupResponse, false)); if (isRetriable) testRetriable(commitRequestManager, Collections.singletonList(future), error); @@ -1458,13 +1487,54 @@ public void testPollWithFatalErrorShouldFailAllUnsentRequests() { assertEmptyPendingRequests(commitRequestManager); } - + private static void assertEmptyPendingRequests(CommitRequestManager commitRequestManager) { assertTrue(commitRequestManager.pendingRequests.inflightOffsetFetches.isEmpty()); assertTrue(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); assertTrue(commitRequestManager.pendingRequests.unsentOffsetCommits.isEmpty()); } + @Test + public void testPollWithFatalErrorDuringCoordinatorIsEmptyAndClosing() { + CommitRequestManager commitRequestManager = create(true, 100); + + Map offsets = Map.of(new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); + + var commitFuture = commitRequestManager.commitAsync(offsets); + + commitRequestManager.signalClose(); + when(coordinatorRequestManager.coordinator()).thenReturn(Optional.empty()); + when(coordinatorRequestManager.fatalError()) + .thenReturn(Optional.of(new GroupAuthorizationException("Fatal error"))); + + assertEquals(NetworkClientDelegate.PollResult.EMPTY, commitRequestManager.poll(time.milliseconds())); + + assertTrue(commitFuture.isCompletedExceptionally()); + + TestUtils.assertFutureThrows(GroupAuthorizationException.class, commitFuture, "Fatal error"); + } + + @Test + public void testPollWithClosingAndPendingRequests() { + CommitRequestManager commitRequestManager = create(true, 100); + + Map offsets = Map.of(new TopicPartition("topic", 1), + new OffsetAndMetadata(0)); + + var commitFuture = commitRequestManager.commitAsync(offsets); + + commitRequestManager.signalClose(); + when(coordinatorRequestManager.coordinator()).thenReturn(Optional.empty()); + + assertEquals(NetworkClientDelegate.PollResult.EMPTY, commitRequestManager.poll(time.milliseconds())); + + assertTrue(commitFuture.isCompletedExceptionally()); + + TestUtils.assertFutureThrows(CommitFailedException.class, commitFuture, + "Failed to commit offsets: Coordinator unknown and consumer is closing"); + } + // Supplies (error, isRetriable) private static Stream partitionDataErrorSupplier() { return Stream.of( @@ -1535,6 +1605,7 @@ private List assertPoll( private CommitRequestManager create(final boolean autoCommitEnabled, final long autoCommitInterval) { props.setProperty(AUTO_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(autoCommitInterval)); props.setProperty(ENABLE_AUTO_COMMIT_CONFIG, String.valueOf(autoCommitEnabled)); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); if (autoCommitEnabled) props.setProperty(GROUP_ID_CONFIG, TestUtils.randomString(10)); @@ -1559,18 +1630,26 @@ private ClientResponse buildOffsetFetchClientResponse( final NetworkClientDelegate.UnsentRequest request, final Set topicPartitions, final Errors error) { - HashMap topicPartitionData = new HashMap<>(); - topicPartitions.forEach(tp -> topicPartitionData.put(tp, new OffsetFetchResponse.PartitionData( - 100L, - Optional.of(1), - "metadata", - Errors.NONE))); - return buildOffsetFetchClientResponse(request, topicPartitionData, error, false); + OffsetFetchResponseData.OffsetFetchResponseGroup group = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(DEFAULT_GROUP_ID) + .setErrorCode(error.code()) + .setTopics(topicPartitions.stream().collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(entry.getKey()) + .setPartitions(entry.getValue().stream().map(partition -> + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partition.partition()) + .setCommittedOffset(100L) + .setCommittedLeaderEpoch(1) + .setMetadata("metadata") + ).collect(Collectors.toList())) + ).collect(Collectors.toList())); + return buildOffsetFetchClientResponse(request, group, false); } private ClientResponse buildOffsetFetchClientResponseDisconnected( final NetworkClientDelegate.UnsentRequest request) { - return buildOffsetFetchClientResponse(request, Collections.emptyMap(), Errors.NONE, true); + return buildOffsetFetchClientResponse(request, new OffsetFetchResponseData.OffsetFetchResponseGroup(), true); } private ClientResponse buildOffsetCommitClientResponse(final OffsetCommitResponse commitResponse) { @@ -1686,14 +1765,12 @@ private ClientResponse mockOffsetCommitResponseDisconnected(String topic, int pa private ClientResponse buildOffsetFetchClientResponse( final NetworkClientDelegate.UnsentRequest request, - final Map topicPartitionData, - final Errors error, + final OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse, final boolean disconnected) { AbstractRequest abstractRequest = request.requestBuilder().build(); assertInstanceOf(OffsetFetchRequest.class, abstractRequest); OffsetFetchRequest offsetFetchRequest = (OffsetFetchRequest) abstractRequest; - OffsetFetchResponse response = - new OffsetFetchResponse(error, topicPartitionData); + OffsetFetchResponse response = new OffsetFetchResponse.Builder(groupResponse).build(ApiKeys.OFFSET_FETCH.latestVersion()); return new ClientResponse( new RequestHeader(ApiKeys.OFFSET_FETCH, offsetFetchRequest.version(), "", 1), request.handler(), diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index 5c9e06ff90d62..a839618cf7c3e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -52,6 +52,7 @@ import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; @@ -73,7 +74,6 @@ import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.SyncGroupRequest; import org.apache.kafka.common.requests.SyncGroupResponse; @@ -163,7 +163,7 @@ public abstract class ConsumerCoordinatorTest { private final String consumerId2 = "consumer2"; private MockClient client; - private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { + private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap<>() { { put(topic1, 1); put(topic2, 1); @@ -208,7 +208,7 @@ public void setup() { this.rebalanceListener = new MockRebalanceListener(); this.mockOffsetCommitCallback = new MockCommitCallback(); this.partitionAssignor.clear(); - this.rebalanceConfig = buildRebalanceConfig(Optional.empty()); + this.rebalanceConfig = buildRebalanceConfig(Optional.empty(), null); this.coordinator = buildCoordinator(rebalanceConfig, metrics, assignors, @@ -216,15 +216,15 @@ public void setup() { subscriptions); } - private GroupRebalanceConfig buildRebalanceConfig(Optional groupInstanceId) { + private GroupRebalanceConfig buildRebalanceConfig(Optional groupInstanceId, String rackId) { return new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, + rackId, retryBackoffMs, - retryBackoffMaxMs, - groupInstanceId.isEmpty()); + retryBackoffMaxMs); } @AfterEach @@ -332,7 +332,7 @@ public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfN List> capturedTopics = topicsCaptor.getAllValues(); // expected the final group subscribed topics to be updated to "topic1" and "topic2" - Set expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1, topic2)); + Set expectedTopicsGotCalled = Set.of(topic1, topic2); assertEquals(expectedTopicsGotCalled, capturedTopics.get(1)); } } @@ -367,7 +367,7 @@ private List validateCooperativeA List metadata = new ArrayList<>(); for (Map.Entry> subscriptionEntry : memberSubscriptions.entrySet()) { - ByteBuffer buf = null; + ByteBuffer buf; if (subscriptionEntry.getKey().equals(consumerId)) { buf = ConsumerProtocol.serializeSubscription(subscriptionConsumer1); } else { @@ -567,13 +567,13 @@ public void testCommitAsyncWithUserAssignedType() { assertFalse(client.hasInFlightRequests()); // should try to find coordinator since we are commit async - coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), (offsets, exception) -> { - fail("Commit should not get responses, but got offsets:" + offsets + ", and exception:" + exception); - }); + coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), (offsets, exception) -> + fail("Commit should not get responses, but got offsets:" + offsets + ", and exception:" + exception) + ); coordinator.poll(time.timer(0)); assertTrue(coordinator.coordinatorUnknown()); assertTrue(client.hasInFlightRequests()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); client.respond(groupCoordinatorResponse(node, Errors.NONE)); coordinator.poll(time.timer(0)); @@ -581,7 +581,7 @@ public void testCommitAsyncWithUserAssignedType() { // after we've discovered the coordinator we should send // out the commit request immediately assertTrue(client.hasInFlightRequests()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 1); + assertEquals(1, coordinator.inFlightAsyncCommits.get()); } @Test @@ -614,19 +614,17 @@ public void testEnsureCompletingAsyncCommitsWhenSyncCommitWithoutOffsets() { Map offsets = singletonMap(tp, new OffsetAndMetadata(123)); final AtomicBoolean committed = new AtomicBoolean(); - coordinator.commitOffsetsAsync(offsets, (committedOffsets, exception) -> { - committed.set(true); - }); + coordinator.commitOffsetsAsync(offsets, (committedOffsets, exception) -> committed.set(true)); assertFalse(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(100L)), "expected sync commit to fail"); assertFalse(committed.get()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 1); + assertEquals(1, coordinator.inFlightAsyncCommits.get()); prepareOffsetCommitRequest(singletonMap(tp, 123L), Errors.NONE); assertTrue(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(Long.MAX_VALUE)), "expected sync commit to succeed"); assertTrue(committed.get(), "expected commit callback to be invoked"); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); } @Test @@ -647,13 +645,13 @@ public void testManyInFlightAsyncCommitsWithCoordinatorDisconnect() { "Unexpected exception cause type: " + (cause == null ? null : cause.getClass())); }); } - assertEquals(coordinator.inFlightAsyncCommits.get(), numRequests); + assertEquals(numRequests, coordinator.inFlightAsyncCommits.get()); coordinator.markCoordinatorUnknown("test cause"); consumerClient.pollNoWakeup(); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(numRequests, responses.get()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); } @Test @@ -682,8 +680,8 @@ public void testCoordinatorUnknownInUnsentCallbacksAfterCoordinatorDead() { ) ); - consumerClient.send(coordinator.checkAndGetCoordinator(), new OffsetCommitRequest.Builder(offsetCommitRequestData)) - .compose(new RequestFutureAdapter() { + consumerClient.send(coordinator.checkAndGetCoordinator(), OffsetCommitRequest.Builder.forTopicNames(offsetCommitRequestData)) + .compose(new RequestFutureAdapter<>() { @Override public void onSuccess(ClientResponse value, RequestFuture future) {} @@ -698,7 +696,7 @@ public void onFailure(RuntimeException e, RequestFuture future) { coordinator.markCoordinatorUnknown("test cause"); consumerClient.pollNoWakeup(); assertTrue(asyncCallbackInvoked.get()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); } @Test @@ -1280,7 +1278,7 @@ public void testForceMetadataRefreshForPatternSubscriptionDuringRebalance() { coordinator.poll(time.timer(Long.MAX_VALUE)); // Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain two topics. - final Set updatedSubscriptionSet = new HashSet<>(Arrays.asList(topic1, topic2)); + final Set updatedSubscriptionSet = Set.of(topic1, topic2); assertEquals(updatedSubscriptionSet, subscriptions.subscription()); // Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger @@ -1294,23 +1292,19 @@ public void testForceMetadataRefreshForPatternSubscriptionDuringRebalance() { public void testForceMetadataDeleteForPatternSubscriptionDuringRebalance() { try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) { subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener)); - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap() { + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap<>() { { put(topic1, 1); put(topic2, 1); } })); coordinator.maybeUpdateSubscriptionMetadata(); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), subscriptions.subscription()); + assertEquals(Set.of(topic1, topic2), subscriptions.subscription()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); - MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { - { - put(topic1, 1); - } - }); + MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, Map.of(topic1, 1)); // Instrument the test so that metadata will contain only one topic after next refresh. client.prepareMetadataUpdate(deletedMetadataResponse); @@ -2077,7 +2071,7 @@ public void testUpdateMetadataDuringRebalance() { coordinator.poll(time.timer(Long.MAX_VALUE)); assertFalse(coordinator.rejoinNeededOrPending()); - assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions()); + assertEquals(Set.of(tp1, tp2), subscriptions.assignedPartitions()); } /** @@ -2269,7 +2263,7 @@ public void testRejoinGroup() { // and join the group again rebalanceListener.revoked = null; rebalanceListener.assigned = null; - subscriptions.subscribe(new HashSet<>(Arrays.asList(topic1, otherTopic)), Optional.of(rebalanceListener)); + subscriptions.subscribe(Set.of(topic1, otherTopic), Optional.of(rebalanceListener)); client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(assigned, Errors.NONE)); coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE)); @@ -2355,7 +2349,7 @@ private void testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors error) MockCommitCallback secondCommitCallback = new MockCommitCallback(); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), firstCommitCallback); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), secondCommitCallback); - assertEquals(coordinator.inFlightAsyncCommits.get(), 2); + assertEquals(2, coordinator.inFlightAsyncCommits.get()); respondToOffsetCommitRequest(singletonMap(t1p, 100L), error); consumerClient.pollNoWakeup(); @@ -2365,7 +2359,7 @@ private void testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors error) assertTrue(coordinator.coordinatorUnknown()); assertInstanceOf(RetriableCommitFailedException.class, firstCommitCallback.exception); assertInstanceOf(RetriableCommitFailedException.class, secondCommitCallback.exception); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); } @Test @@ -2554,7 +2548,7 @@ public void testCommitOffsetAsyncWithDefaultCallback() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked); assertNull(mockOffsetCommitCallback.exception); @@ -2585,7 +2579,7 @@ public void testCommitAfterLeaveGroup() { coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), callback(success)); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(success.get()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); } @Test @@ -2595,7 +2589,7 @@ public void testCommitOffsetAsyncFailedWithDefaultCallback() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked); assertInstanceOf(RetriableCommitFailedException.class, mockOffsetCommitCallback.exception); @@ -2610,7 +2604,7 @@ public void testCommitOffsetAsyncCoordinatorNotAvailable() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2627,7 +2621,7 @@ public void testCommitOffsetAsyncNotCoordinator() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2644,7 +2638,7 @@ public void testCommitOffsetAsyncDisconnected() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequestDisconnect(singletonMap(t1p, 100L)); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2697,12 +2691,7 @@ public void testAsyncCommitCallbacksInvokedPriorToSyncCommitCompletion() throws final OffsetAndMetadata firstOffset = new OffsetAndMetadata(0L); final OffsetAndMetadata secondOffset = new OffsetAndMetadata(1L); - coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), new OffsetCommitCallback() { - @Override - public void onComplete(Map offsets, Exception exception) { - committedOffsets.add(firstOffset); - } - }); + coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), (offsets, exception) -> committedOffsets.add(firstOffset)); // Do a synchronous commit in the background so that we can send both responses at the same time Thread thread = new Thread() { @@ -2713,7 +2702,7 @@ public void run() { } }; - assertEquals(coordinator.inFlightAsyncCommits.get(), 1); + assertEquals(1, coordinator.inFlightAsyncCommits.get()); thread.start(); client.waitForRequests(2, 5000); @@ -2721,7 +2710,7 @@ public void run() { respondToOffsetCommitRequest(singletonMap(t1p, secondOffset.offset()), Errors.NONE); thread.join(); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); assertEquals(Arrays.asList(firstOffset, secondOffset), committedOffsets); } @@ -2985,7 +2974,7 @@ public void testCommitOffsetFencedInstanceWithNewGeneration() { @Test public void testCommitOffsetShouldNotSetInstanceIdIfMemberIdIsUnknown() { - rebalanceConfig = buildRebalanceConfig(groupInstanceId); + rebalanceConfig = buildRebalanceConfig(groupInstanceId, null); ConsumerCoordinator coordinator = buildCoordinator( rebalanceConfig, new Metrics(), @@ -3110,7 +3099,7 @@ public void testRefreshOffsetWithValidation() { assertEquals(Collections.emptySet(), subscriptions.initializingPartitions()); assertFalse(subscriptions.hasAllFetchPositions()); assertTrue(subscriptions.awaitingValidation(t1p)); - assertEquals(subscriptions.position(t1p).offset, 100L); + assertEquals(100L, subscriptions.position(t1p).offset); assertNull(subscriptions.validPosition(t1p)); } @@ -3122,10 +3111,19 @@ public void testFetchCommittedOffsets() { long offset = 500L; String metadata = "blahblah"; Optional leaderEpoch = Optional.of(15); - OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, leaderEpoch, - metadata, Errors.NONE); - client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); + client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t1p.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p.partition()) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get()) + .setMetadata(metadata) + )) + ))); + Map fetchedOffsets = coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE)); @@ -3138,10 +3136,17 @@ public void testTopicAuthorizationFailedInOffsetFetch() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); - OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(-1, Optional.empty(), - "", Errors.TOPIC_AUTHORIZATION_FAILED); + client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t1p.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p.partition()) + .setCommittedOffset(-1) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()) + )) + ))); - client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); TopicAuthorizationException exception = assertThrows(TopicAuthorizationException.class, () -> coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE))); @@ -3154,7 +3159,7 @@ public void testRefreshOffsetsGroupNotAuthorized() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); - client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED, Collections.emptyMap())); + client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED, List.of())); try { coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE)); fail("Expected group authorization error"); @@ -3203,7 +3208,7 @@ public void testRefreshOffsetRetriableErrorCoordinatorLookup(Errors error, boole coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); - client.prepareResponse(offsetFetchResponse(error, Collections.emptyMap())); + client.prepareResponse(offsetFetchResponse(error, List.of())); if (expectCoordinatorRelookup) { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); } @@ -3263,12 +3268,9 @@ public void testNoCoordinatorDiscoveryIfPartitionAwaitingReset() { public void testAuthenticationFailureInEnsureActiveGroup() { client.createPendingAuthenticationError(node, 300); - try { - coordinator.ensureActiveGroup(); - fail("Expected an authentication error."); - } catch (AuthenticationException e) { - // OK - } + assertThrows(AuthenticationException.class, + () -> coordinator.ensureActiveGroup(), + "Expected an authentication error."); } @Test @@ -3467,7 +3469,7 @@ public void testCommitOffsetRequestAsyncAlwaysReceiveFencedException() { assertThrows(FencedInstanceIdException.class, this::receiveFencedInstanceIdException); assertThrows(FencedInstanceIdException.class, () -> coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback())); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); assertThrows(FencedInstanceIdException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE))); } @@ -3694,7 +3696,6 @@ private void supportStableFlag(final short upperVersion, final boolean expectThr autoCommitIntervalMs, null, true, - null, Optional.empty()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); @@ -3703,14 +3704,19 @@ private void supportStableFlag(final short upperVersion, final boolean expectThr long offset = 500L; String metadata = "blahblah"; Optional leaderEpoch = Optional.of(15); - OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, leaderEpoch, - metadata, Errors.NONE); - if (upperVersion < 8) { - client.prepareResponse(new OffsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); - } else { - client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); - } + client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(t1p.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(t1p.partition()) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get()) + .setMetadata(metadata) + )) + ))); + if (expectThrows) { assertThrows(UnsupportedVersionException.class, () -> coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE))); @@ -3732,7 +3738,7 @@ private void receiveFencedInstanceIdException() { prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback()); - assertEquals(coordinator.inFlightAsyncCommits.get(), 0); + assertEquals(0, coordinator.inFlightAsyncCommits.get()); coordinator.invokeCompletedOffsetCommitCallbacks(); } @@ -3740,7 +3746,7 @@ private ConsumerCoordinator prepareCoordinatorForCloseTest(final boolean useGrou final boolean autoCommit, final Optional groupInstanceId, final boolean shouldPoll) { - rebalanceConfig = buildRebalanceConfig(groupInstanceId); + rebalanceConfig = buildRebalanceConfig(groupInstanceId, null); ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, @@ -3858,7 +3864,6 @@ private ConsumerCoordinator buildCoordinator(final GroupRebalanceConfig rebalanc autoCommitIntervalMs, null, false, - null, Optional.empty()); } @@ -3975,10 +3980,20 @@ private OffsetCommitResponse offsetCommitResponse(Map re return new OffsetCommitResponse(responseData); } - private OffsetFetchResponse offsetFetchResponse(Errors error, Map responseData) { - return new OffsetFetchResponse(throttleMs, - singletonMap(groupId, error), - singletonMap(groupId, responseData)); + private OffsetFetchResponse offsetFetchResponse( + Errors errors, + List topics + ) { + return new OffsetFetchResponse( + new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(groupId) + .setErrorCode(errors.code()) + .setTopics(topics) + )), + ApiKeys.OFFSET_FETCH.latestVersion() + ); } private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset) { @@ -3986,9 +4001,18 @@ private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partit } private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset, Optional epoch) { - OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, - epoch, metadata, partitionLevelError); - return offsetFetchResponse(Errors.NONE, singletonMap(tp, data)); + return offsetFetchResponse(Errors.NONE, List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(tp.topic()) + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp.partition()) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(epoch.orElse(-1)) + .setMetadata(metadata) + .setErrorCode(partitionLevelError.code()) + )) + )); } private OffsetCommitCallback callback(final AtomicBoolean success) { @@ -4083,9 +4107,10 @@ private void createRackAwareCoordinator(String rackId, MockPartitionAssignor ass metrics = new Metrics(time); + rebalanceConfig = buildRebalanceConfig(rebalanceConfig.groupInstanceId, rackId); coordinator = new ConsumerCoordinator(rebalanceConfig, new LogContext(), consumerClient, Collections.singletonList(assignor), metadata, subscriptions, - metrics, consumerId + groupId, time, false, autoCommitIntervalMs, null, false, rackId, Optional.empty()); + metrics, consumerId + groupId, time, false, autoCommitIntervalMs, null, false, Optional.empty()); } private static MetadataResponse rackAwareMetadata(int numNodes, @@ -4164,7 +4189,6 @@ private void createMockHeartbeatThreadCoordinator() { autoCommitIntervalMs, null, false, - null, Optional.empty(), Optional.of(() -> Mockito.mock(BaseHeartbeatThread.class))); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java index 2febd2085b8f9..9063ae5ab5bf4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java @@ -647,17 +647,6 @@ public void testUnsupportedVersionFromClient(String errorMsg) { clearInvocations(backgroundEventHandler); } - private void mockErrorResponse(Errors error, String exceptionCustomMsg) { - time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); - NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); - assertEquals(1, result.unsentRequests.size()); - - when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); - ClientResponse response = createHeartbeatResponse( - result.unsentRequests.get(0), error, exceptionCustomMsg); - result.unsentRequests.get(0).handler().onComplete(response); - } - private void mockResponseWithException(UnsupportedVersionException exception, boolean isFromBroker) { time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); @@ -1020,6 +1009,34 @@ public void testRegexInJoiningHeartbeat() { assertNull(data.subscribedTopicRegex()); } + @Test + public void testRackIdInHeartbeatLifecycle() { + heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); + createHeartbeatRequestStateWithZeroHeartbeatInterval(); + + // Initial heartbeat with rackId + mockJoiningMemberData(null); + when(membershipManager.rackId()).thenReturn(Optional.of("rack1")); + ConsumerGroupHeartbeatRequestData data = heartbeatState.buildRequestData(); + assertEquals("rack1", data.rackId()); + + // RackId not included in HB if member state is not JOINING + when(membershipManager.state()).thenReturn(MemberState.STABLE); + data = heartbeatState.buildRequestData(); + assertNull(data.rackId()); + + // RackId included in HB if member state changes to JOINING again + when(membershipManager.state()).thenReturn(MemberState.JOINING); + data = heartbeatState.buildRequestData(); + assertEquals("rack1", data.rackId()); + + // Empty rackId not included in HB + when(membershipManager.rackId()).thenReturn(Optional.empty()); + heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); + data = heartbeatState.buildRequestData(); + assertNull(data.rackId()); + } + private void assertHeartbeat(ConsumerHeartbeatRequestManager hrm, int nextPollMs) { NetworkClientDelegate.PollResult pollResult = hrm.poll(time.milliseconds()); assertEquals(1, pollResult.unsentRequests.size()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java index 20cf5ea59e6ac..9edf178182831 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java @@ -71,6 +71,8 @@ import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer.invokeRebalanceCallbacks; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; @@ -125,7 +127,7 @@ public void setup() { time = new MockTime(0); backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue, time, mock(AsyncConsumerMetrics.class)); metrics = new Metrics(time); - rebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics); + rebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState); when(commitRequestManager.maybeAutoCommitSyncBeforeRebalance(anyLong())).thenReturn(CompletableFuture.completedFuture(null)); } @@ -142,17 +144,20 @@ private ConsumerMembershipManager createMembershipManagerJoiningGroup(String gro private ConsumerMembershipManager createMembershipManager(String groupInstanceId) { ConsumerMembershipManager manager = spy(new ConsumerMembershipManager( - GROUP_ID, Optional.ofNullable(groupInstanceId), REBALANCE_TIMEOUT, Optional.empty(), + GROUP_ID, Optional.ofNullable(groupInstanceId), Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true)); assertMemberIdIsGenerated(manager.memberId()); return manager; } - private ConsumerMembershipManager createMembershipManagerJoiningGroup(String groupInstanceId, - String serverAssignor) { + private ConsumerMembershipManager createMembershipManagerJoiningGroup( + String groupInstanceId, + String serverAssignor, + String rackId + ) { ConsumerMembershipManager manager = spy(new ConsumerMembershipManager( - GROUP_ID, Optional.ofNullable(groupInstanceId), REBALANCE_TIMEOUT, + GROUP_ID, Optional.ofNullable(groupInstanceId), Optional.ofNullable(rackId), REBALANCE_TIMEOUT, Optional.ofNullable(serverAssignor), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true)); assertMemberIdIsGenerated(manager.memberId()); @@ -165,10 +170,28 @@ public void testMembershipManagerServerAssignor() { ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); assertEquals(Optional.empty(), membershipManager.serverAssignor()); - membershipManager = createMembershipManagerJoiningGroup("instance1", "Uniform"); + membershipManager = createMembershipManagerJoiningGroup("instance1", "Uniform", null); assertEquals(Optional.of("Uniform"), membershipManager.serverAssignor()); } + @Test + public void testMembershipManagerRackId() { + ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); + assertEquals(Optional.empty(), membershipManager.rackId()); + + membershipManager = createMembershipManagerJoiningGroup(null, null, "rack1"); + assertEquals(Optional.of("rack1"), membershipManager.rackId()); + } + + @Test + public void testAssignedPartitionCountMetricRegistered() { + MetricName metricName = metrics.metricName( + "assigned-partitions", + CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX + ); + assertNotNull(metrics.metric(metricName), "Metric assigned-partitions should have been registered"); + } + @Test public void testMembershipManagerInitSupportsEmptyGroupInstanceId() { createMembershipManagerJoiningGroup(); @@ -231,7 +254,7 @@ public void testTransitionToFatal() { @Test public void testTransitionToFailedWhenTryingToJoin() { ConsumerMembershipManager membershipManager = new ConsumerMembershipManager( - GROUP_ID, Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), + GROUP_ID, Optional.empty(), Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true); assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); @@ -425,6 +448,10 @@ private void assertTransitionToUnsubscribeOnHBSentAndWaitForResponseToCompleteLe membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(new Assignment(), membershipManager.memberId())); + assertFalse(sendLeave.isDone(), "Send leave operation should not complete until a leave response is received"); + + membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); + assertSendLeaveCompleted(membershipManager, sendLeave); } @@ -955,6 +982,9 @@ public void testHeartbeatSuccessfulResponseWhenLeavingGroupCompletesLeave() { assertFalse(leaveResult.isDone()); membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(createAssignment(true), membershipManager.memberId())); + assertFalse(leaveResult.isDone()); + + membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); assertSendLeaveCompleted(membershipManager, leaveResult); } @@ -998,16 +1028,43 @@ public void testIgnoreHeartbeatResponseWhenNotInGroup(MemberState state) { assertEquals(state, membershipManager.state()); verify(responseData, never()).memberId(); - verify(responseData, never()).memberEpoch(); + // In unsubscribed, we check if we received a leave group response, so we do verify member epoch. + if (state != MemberState.UNSUBSCRIBED) { + verify(responseData, never()).memberEpoch(); + } verify(responseData, never()).assignment(); } @Test - public void testLeaveGroupWhenStateIsReconciling() { - ConsumerMembershipManager membershipManager = mockJoinAndReceiveAssignment(false); - assertEquals(MemberState.RECONCILING, membershipManager.state()); + public void testIgnoreLeaveResponseWhenNotLeavingGroup() { + ConsumerMembershipManager membershipManager = createMemberInStableState(); - testLeaveGroupReleasesAssignmentAndResetsEpochToSendLeaveGroup(membershipManager); + CompletableFuture leaveResult = membershipManager.leaveGroup(); + + // Send leave request, transitioning to UNSUBSCRIBED state + membershipManager.onHeartbeatRequestGenerated(); + assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); + + // Receive a previous heartbeat response, which should be ignored + membershipManager.onHeartbeatSuccess(new ConsumerGroupHeartbeatResponse( + new ConsumerGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(membershipManager.memberId()) + .setMemberEpoch(MEMBER_EPOCH) + )); + assertFalse(leaveResult.isDone()); + + // Receive a leave heartbeat response, which should unblock the consumer + membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); + + // Consumer unblocks and updates subscription + membershipManager.onSubscriptionUpdated(); + membershipManager.onConsumerPoll(); + + membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); + + assertEquals(MemberState.JOINING, membershipManager.state()); + assertEquals(0, membershipManager.memberEpoch()); } @Test @@ -2703,7 +2760,7 @@ private ConsumerMembershipManager createMemberInStableState() { } private ConsumerMembershipManager createMemberInStableState(String groupInstanceId) { - ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(groupInstanceId, null); + ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(groupInstanceId, null, null); ConsumerGroupHeartbeatResponse heartbeatResponse = createConsumerGroupHeartbeatResponse(new Assignment(), membershipManager.memberId()); when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); when(subscriptionState.rebalanceListener()).thenReturn(Optional.empty()); @@ -2901,6 +2958,13 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse( .setAssignment(assignment)); } + private ConsumerGroupHeartbeatResponse createConsumerGroupLeaveResponse(String memberId) { + return new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(memberId) + .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH)); + } + /** * Create heartbeat response with the given assignment and a bumped epoch (incrementing by 1 * as default but could be any increment). This will be used to mock when a member diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java index 949bdc9aa727d..f57e93a2a15e8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.Node; @@ -100,6 +101,78 @@ private void testPatternSubscription(boolean includeInternalTopics) { assertEquals(Collections.singleton("__matching_topic"), metadata.fetch().topics()); } + @Test + public void testSubscriptionToBrokerRegexDoesNotRequestAllTopicsMetadata() { + // Subscribe to broker-side regex + subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); + + // Receive assignment from coordinator with topic IDs only + Uuid assignedTopicId = Uuid.randomUuid(); + subscription.setAssignedTopicIds(Set.of(assignedTopicId)); + + // Metadata request should only include the assigned topic IDs + try (ConsumerMetadata metadata = newConsumerMetadata(false)) { + MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); + assertFalse(builder.isAllTopics(), "Should not request all topics when using broker-side regex"); + assertEquals(List.of(assignedTopicId), builder.topicIds(), "Should only request assigned topic IDs when using broker-side regex"); + } + } + + @Test + public void testSubscriptionToBrokerRegexRetainsAssignedTopics() { + // Subscribe to broker-side regex + subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); + + // Receive assignment from coordinator with topic IDs only + Uuid assignedTopicId = Uuid.randomUuid(); + subscription.setAssignedTopicIds(Set.of(assignedTopicId)); + + // Metadata request for assigned topic IDs + try (ConsumerMetadata metadata = newConsumerMetadata(false)) { + MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); + assertEquals(List.of(assignedTopicId), builder.topicIds()); + + // Metadata response with the assigned topic ID and name + Map topicIds = Map.of("__matching_topic", assignedTopicId); + MetadataResponse response = RequestTestUtils.metadataUpdateWithIds(1, singletonMap("__matching_topic", 1), topicIds); + metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds()); + + assertEquals(Set.of("__matching_topic"), new HashSet<>(metadata.fetch().topics())); + assertEquals(Set.of("__matching_topic"), metadata.fetch().topics()); + } + } + + @Test + public void testSubscriptionToBrokerRegexAllowsTransientTopics() { + // Subscribe to broker-side regex + subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); + + // Receive assignment from coordinator with topic IDs only + Uuid assignedTopicId = Uuid.randomUuid(); + subscription.setAssignedTopicIds(Set.of(assignedTopicId)); + + // Metadata request should only include the assigned topic IDs + try (ConsumerMetadata metadata = newConsumerMetadata(false)) { + MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); + assertFalse(builder.isAllTopics()); + assertEquals(List.of(assignedTopicId), builder.topicIds()); + + // Call to offsets-related APIs starts. Metadata requests should move to requesting topic names temporarily. + String transientTopic = "__transient_topic"; + metadata.addTransientTopics(Set.of(transientTopic)); + builder = metadata.newMetadataRequestBuilder(); + assertFalse(builder.isAllTopics()); + // assertTrue(builder.topicIds().isEmpty()); + assertEquals(List.of(transientTopic), builder.topics()); + + // Call to offsets-related APIs ends. Metadata requests should move back to requesting topic IDs for RE2J. + metadata.clearTransientTopics(); + builder = metadata.newMetadataRequestBuilder(); + assertFalse(builder.isAllTopics()); + assertEquals(List.of(assignedTopicId), builder.topicIds()); + } + } + @Test public void testUserAssignment() { subscription.assignFromUser(Set.of( diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java index b5ab39e62c720..1f5551e7df121 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java @@ -41,7 +41,6 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestUtils; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.time.Duration; @@ -266,45 +265,34 @@ public void testMetadataFailurePropagated() { assertEquals(metadataException, exc); } - @Disabled("KAFKA-17554") @Test public void testFutureCompletionOutsidePoll() throws Exception { // Tests the scenario in which the request that is being awaited in one thread // is received and completed in another thread. - - final CountDownLatch t1TheardCountDownLatch = new CountDownLatch(1); - final CountDownLatch t2ThreadCountDownLatch = new CountDownLatch(2); - final RequestFuture future = consumerClient.send(node, heartbeat()); consumerClient.pollNoWakeup(); // dequeue and send the request + CountDownLatch bothThreadsReady = new CountDownLatch(2); + client.enableBlockingUntilWakeup(2); - Thread t1 = new Thread(() -> { - t1TheardCountDownLatch.countDown(); + + Thread t1 = new Thread(() -> { + bothThreadsReady.countDown(); consumerClient.pollNoWakeup(); - t2ThreadCountDownLatch.countDown(); }); - - t1.start(); Thread t2 = new Thread(() -> { - try { - t2ThreadCountDownLatch.await(); - consumerClient.poll(future); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + bothThreadsReady.countDown(); + consumerClient.poll(future); }); + + t1.start(); t2.start(); - - // Simulate a network response and return from the poll in t1 + + // Wait until both threads are blocked in poll + bothThreadsReady.await(); client.respond(heartbeatResponse(Errors.NONE)); - // Wait for t1 to block in poll - t1TheardCountDownLatch.await(); - client.wakeup(); - // while t1 is blocked in poll, t2 should be able to complete the future - t2ThreadCountDownLatch.countDown(); // Both threads should complete since t1 should wakeup t2 t1.join(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 520279fc8d454..35ccb17dfab43 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -21,6 +21,7 @@ import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -32,21 +33,21 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; import java.util.LinkedList; import java.util.List; -import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.function.Supplier; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -117,10 +118,7 @@ public void testEnsureCloseStopsRunningThread() { @ParameterizedTest @ValueSource(longs = {ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS - 1, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS + 1}) public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { - List> list = new ArrayList<>(); - list.add(Optional.of(coordinatorRequestManager)); - list.add(Optional.of(heartbeatRequestManager)); - + List list = List.of(coordinatorRequestManager, heartbeatRequestManager); when(requestManagers.entries()).thenReturn(list); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); @@ -158,16 +156,13 @@ public void testStartupAndTearDown() throws InterruptedException { @Test public void testRequestsTransferFromManagersToClientOnThreadRun() { - List> list = new ArrayList<>(); - list.add(Optional.of(coordinatorRequestManager)); - list.add(Optional.of(heartbeatRequestManager)); - list.add(Optional.of(offsetsRequestManager)); + List list = List.of(coordinatorRequestManager, heartbeatRequestManager, offsetsRequestManager); when(requestManagers.entries()).thenReturn(list); when(coordinatorRequestManager.poll(anyLong())).thenReturn(mock(NetworkClientDelegate.PollResult.class)); consumerNetworkThread.runOnce(); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).poll(anyLong()))); - requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).maximumTimeToWait(anyLong()))); + requestManagers.entries().forEach(rm -> verify(rm).poll(anyLong())); + requestManagers.entries().forEach(rm -> verify(rm).maximumTimeToWait(anyLong())); verify(networkClientDelegate).addAll(any(NetworkClientDelegate.PollResult.class)); verify(networkClientDelegate).poll(anyLong(), anyLong()); } @@ -178,7 +173,7 @@ public void testMaximumTimeToWait() { // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); - when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(heartbeatRequestManager))); + when(requestManagers.entries()).thenReturn(List.of(heartbeatRequestManager)); when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) defaultHeartbeatIntervalMs); consumerNetworkThread.runOnce(); @@ -211,10 +206,11 @@ public void testSendUnsentRequests() { verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); } - @Test - public void testRunOnceRecordTimeBetweenNetworkThreadPoll() { + @ParameterizedTest + @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") + public void testRunOnceRecordTimeBetweenNetworkThreadPoll(String groupName) { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( new LogContext(), time, @@ -233,22 +229,23 @@ public void testRunOnceRecordTimeBetweenNetworkThreadPoll() { assertEquals( 10, (double) metrics.metric( - metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP) + metrics.metricName("time-between-network-thread-poll-avg", groupName) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP) + metrics.metricName("time-between-network-thread-poll-max", groupName) ).metricValue() ); } } - @Test - public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime() { + @ParameterizedTest + @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") + public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime(String groupName) { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( new LogContext(), time, @@ -271,21 +268,74 @@ public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTi assertEquals( 0, (double) metrics.metric( - metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP) + metrics.metricName("application-event-queue-size", groupName) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP) + metrics.metricName("application-event-queue-time-avg", groupName) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP) + metrics.metricName("application-event-queue-time-max", groupName) ).metricValue() ); } } + + @Test + public void testNetworkClientDelegateInitializeResourcesError() { + Supplier networkClientDelegateSupplier = () -> { + throw new KafkaException("Injecting NetworkClientDelegate initialization failure"); + }; + Supplier requestManagersSupplier = () -> requestManagers; + testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); + } + + @Test + public void testRequestManagersInitializeResourcesError() { + Supplier networkClientDelegateSupplier = () -> networkClientDelegate; + Supplier requestManagersSupplier = () -> { + throw new KafkaException("Injecting RequestManagers initialization failure"); + }; + testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); + } + + @Test + public void testNetworkClientDelegateAndRequestManagersInitializeResourcesError() { + Supplier networkClientDelegateSupplier = () -> { + throw new KafkaException("Injecting NetworkClientDelegate initialization failure"); + }; + Supplier requestManagersSupplier = () -> { + throw new KafkaException("Injecting RequestManagers initialization failure"); + }; + testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); + } + + /** + * Tests that when an error occurs during {@link ConsumerNetworkThread#initializeResources()} that the + * logic in {@link ConsumerNetworkThread#cleanup()} will not throw errors when closing. + */ + private void testInitializeResourcesError(Supplier networkClientDelegateSupplier, + Supplier requestManagersSupplier) { + // A new ConsumerNetworkThread is created because the shared one doesn't have any issues initializing its + // resources. However, most of the mocks can be reused, so this is mostly boilerplate except for the error + // when a supplier is invoked. + try (ConsumerNetworkThread thread = new ConsumerNetworkThread( + new LogContext(), + time, + applicationEventQueue, + applicationEventReaper, + () -> applicationEventProcessor, + networkClientDelegateSupplier, + requestManagersSupplier, + asyncConsumerMetrics + )) { + assertThrows(KafkaException.class, thread::initializeResources, "initializeResources should fail because one or more Supplier throws an error on get()"); + assertDoesNotThrow(thread::cleanup, "cleanup() should not cause an error because all references are checked before use"); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java index b3c7429323147..5b2f6d6f48e6f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java @@ -32,7 +32,6 @@ import java.time.Duration; import java.util.Arrays; -import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -178,7 +177,7 @@ public void testWakeup() throws Exception { try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { final Thread waitingThread = new Thread(() -> { final Timer timer = time.timer(Duration.ofMinutes(1)); - fetchBuffer.awaitNotEmpty(timer); + fetchBuffer.awaitWakeup(timer); }); waitingThread.start(); fetchBuffer.wakeup(); @@ -204,6 +203,6 @@ private CompletedFetch completedFetch(TopicPartition tp) { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java index 915c9ea9cfae9..c2b4e6ca4c8da 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java @@ -53,7 +53,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -721,7 +720,7 @@ private FetchCollector createFetchCollector(final SubscriptionSt * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } private void buildDependencies() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index e25bccc1892b2..1378e4b53a1e2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -382,7 +382,7 @@ public void testFetcherCloseClosesFetchSessionsInBroker() { // NOTE: by design the FetchRequestManager doesn't perform network I/O internally. That means that calling // the close() method with a Timer will NOT send out the close session requests on close. The network // I/O logic is handled inside ConsumerNetworkThread.runAtClose, so we need to run that logic here. - ConsumerNetworkThread.runAtClose(singletonList(Optional.of(fetcher)), networkClientDelegate, time.milliseconds()); + ConsumerNetworkThread.runAtClose(List.of(fetcher), networkClientDelegate, time.milliseconds()); // the network is polled during the last state of clean up. networkClientDelegate.poll(time.timer(1)); // validate that closing the fetcher has sent a request with final epoch. 2 requests are sent, one for the @@ -1733,7 +1733,7 @@ public void testFetchPositionAfterException() { .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); networkClientDelegate.poll(time.timer(0)); List> allFetchedRecords = new ArrayList<>(); @@ -1794,7 +1794,7 @@ public void testCompletedFetchRemoval() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); networkClientDelegate.poll(time.timer(0)); List> fetchedRecords = new ArrayList<>(); @@ -1815,7 +1815,7 @@ public void testCompletedFetchRemoval() { assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); - assertEquals(oor.offsetOutOfRangePartitions().size(), 1); + assertEquals(1, oor.offsetOutOfRangePartitions().size()); fetchRecordsInto(fetchedRecords); @@ -1865,7 +1865,7 @@ public void testSeekBeforeException() { .setPartitionIndex(tp1.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); networkClientDelegate.poll(time.timer(0)); assertEquals(1, fetchRecords().get(tp0).size()); @@ -2113,7 +2113,7 @@ public void testFetchResponseMetrics() { } assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData)); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData, List.of())); networkClientDelegate.poll(time.timer(0)); Map>> fetchedRecords = fetchRecords(); @@ -2185,7 +2185,7 @@ public void testFetchResponseMetricsWithOnePartitionError() { .setLogStartOffset(0)); assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); networkClientDelegate.poll(time.timer(0)); collectFetch(); @@ -2231,7 +2231,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { .setLogStartOffset(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("val".getBytes())))); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); networkClientDelegate.poll(time.timer(0)); collectFetch(); @@ -2359,7 +2359,7 @@ public void testReturnCommittedTransactions() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); } @Test @@ -2477,9 +2477,9 @@ public void testMultipleAbortMarkers() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); + Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2741,7 +2741,7 @@ public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + assignFromUser(Set.of(tp0, tp1)); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -2758,7 +2758,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); - FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); + FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1, List.of()); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -2784,7 +2784,7 @@ public void testConsumingViaIncrementalFetchRequests() { // The second response contains no new records. LinkedHashMap partitions2 = new LinkedHashMap<>(); - FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); + FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2, List.of()); client.prepareResponse(resp2); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); @@ -2801,7 +2801,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); - FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); + FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3, List.of()); client.prepareResponse(resp3); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); @@ -2854,7 +2854,7 @@ public void testEmptyControlBatch() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); } private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { @@ -2939,8 +2939,8 @@ public void testSubscriptionPositionUpdatedWithEpoch() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(subscriptions.position(tp0).offset, 3L); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); + assertEquals(3L, subscriptions.position(tp0).offset); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); } @Test @@ -3110,7 +3110,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(selected.id(), 1); + assertEquals(1, selected.id()); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -3124,7 +3124,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(selected.id(), -1); + assertEquals(-1, selected.id()); } @Test @@ -3196,7 +3196,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3246,7 +3246,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo .setLastStableOffset(FetchResponse.INVALID_LAST_STABLE_OFFSET) .setLogStartOffset(0) .setRecords(nextRecords)); - client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions), nodeId0); + client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions, List.of()), nodeId0); networkClientDelegate.poll(time.timer(0)); partitionRecords = fetchRecords(); assertFalse(partitionRecords.containsKey(tp0)); @@ -3289,7 +3289,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInform // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3851,7 +3851,7 @@ private void prepareFetchResponses(Node node, Collection partiti }); client.prepareResponseFrom( - FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitionDataMap), + FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitionDataMap, List.of()), node ); } @@ -3906,7 +3906,7 @@ private FetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors .setPartitionIndex(tp.topicPartition().partition()) .setErrorCode(error.code()) .setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK)); - return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords records, @@ -3924,7 +3924,7 @@ private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords rec .setLogStartOffset(0) .setAbortedTransactions(abortedTransactions) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) { @@ -3950,7 +3950,7 @@ private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, Memo .setLastStableOffset(lastStableOffset) .setLogStartOffset(0) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3964,7 +3964,7 @@ private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords recor .setLogStartOffset(0) .setRecords(records) .setPreferredReadReplica(preferredReplicaId.orElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3977,7 +3977,7 @@ private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, .setLastStableOffset(lastStableOffset) .setLogStartOffset(logStartOffset) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } /** @@ -4125,6 +4125,7 @@ private void buildDependencies(MetricConfig metricConfig, properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); + properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConsumerConfig config = new ConsumerConfig(properties); networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 45a747e04daa4..ee051a42ca81a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -1720,7 +1720,7 @@ public void testFetchPositionAfterException() { .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); consumerClient.poll(time.timer(0)); List> allFetchedRecords = new ArrayList<>(); @@ -1781,7 +1781,7 @@ public void testCompletedFetchRemoval() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); consumerClient.poll(time.timer(0)); List> fetchedRecords = new ArrayList<>(); @@ -1802,7 +1802,7 @@ public void testCompletedFetchRemoval() { assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); - assertEquals(oor.offsetOutOfRangePartitions().size(), 1); + assertEquals(1, oor.offsetOutOfRangePartitions().size()); fetchRecordsInto(fetchedRecords); @@ -1852,7 +1852,7 @@ public void testSeekBeforeException() { .setPartitionIndex(tp1.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); consumerClient.poll(time.timer(0)); assertEquals(1, fetchRecords().get(tp0).size()); @@ -2100,7 +2100,7 @@ public void testFetchResponseMetrics() { } assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData)); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData, List.of())); consumerClient.poll(time.timer(0)); Map>> fetchedRecords = fetchRecords(); @@ -2172,7 +2172,7 @@ public void testFetchResponseMetricsWithOnePartitionError() { .setLogStartOffset(0)); assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); consumerClient.poll(time.timer(0)); collectFetch(); @@ -2218,7 +2218,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { .setLogStartOffset(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("val".getBytes())))); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); consumerClient.poll(time.timer(0)); collectFetch(); @@ -2346,7 +2346,7 @@ public void testReturnCommittedTransactions() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); } @Test @@ -2464,9 +2464,9 @@ public void testMultipleAbortMarkers() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); + Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2728,7 +2728,7 @@ public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + assignFromUser(Set.of(tp0, tp1)); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -2745,7 +2745,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); - FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); + FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1, List.of()); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -2771,7 +2771,7 @@ public void testConsumingViaIncrementalFetchRequests() { // The second response contains no new records. LinkedHashMap partitions2 = new LinkedHashMap<>(); - FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); + FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2, List.of()); client.prepareResponse(resp2); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); @@ -2788,7 +2788,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); - FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); + FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3, List.of()); client.prepareResponse(resp3); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); @@ -2922,7 +2922,7 @@ private void verifySessionPartitions() { .setLogStartOffset(0) .setRecords(buildRecords(offset, 2, offset))); } - client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap)); + client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap, List.of())); consumerClient.poll(time.timer(0)); } } @@ -2985,7 +2985,7 @@ public void testFetcherSessionEpochUpdate() throws Exception { .setLogStartOffset(0) .setRecords(buildRecords(nextOffset, 2, nextOffset))); nextOffset += 2; - client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap)); + client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap, List.of())); consumerClient.poll(time.timer(0)); } } @@ -3054,7 +3054,7 @@ public void testEmptyControlBatch() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(fetchedRecords.get(tp0).size(), 2); + assertEquals(2, fetchedRecords.get(tp0).size()); } private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { @@ -3139,8 +3139,8 @@ public void testSubscriptionPositionUpdatedWithEpoch() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(subscriptions.position(tp0).offset, 3L); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); + assertEquals(3L, subscriptions.position(tp0).offset); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); } @Test @@ -3217,8 +3217,8 @@ public void testTruncationDetected() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(subscriptions.position(tp0).offset, 3L); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); + assertEquals(3L, subscriptions.position(tp0).offset); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); } @Test @@ -3388,7 +3388,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(selected.id(), 1); + assertEquals(1, selected.id()); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -3402,7 +3402,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(selected.id(), -1); + assertEquals(-1, selected.id()); } @Test @@ -3473,7 +3473,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3523,7 +3523,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo .setLastStableOffset(FetchResponse.INVALID_LAST_STABLE_OFFSET) .setLogStartOffset(0) .setRecords(nextRecords)); - client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions), nodeId0); + client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions, List.of()), nodeId0); consumerClient.poll(time.timer(0)); partitionRecords = fetchRecords(); assertFalse(partitionRecords.containsKey(tp0)); @@ -3566,7 +3566,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInform // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3689,7 +3689,7 @@ private FetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors .setPartitionIndex(tp.topicPartition().partition()) .setErrorCode(error.code()) .setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK)); - return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords records, @@ -3707,7 +3707,7 @@ private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords rec .setLogStartOffset(0) .setAbortedTransactions(abortedTransactions) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) { @@ -3733,7 +3733,7 @@ private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, Memo .setLastStableOffset(lastStableOffset) .setLogStartOffset(0) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3747,7 +3747,7 @@ private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords recor .setLogStartOffset(0) .setRecords(records) .setPreferredReadReplica(preferredReplicaId.orElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3760,7 +3760,7 @@ private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, .setLastStableOffset(lastStableOffset) .setLogStartOffset(logStartOffset) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); } /** diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java index 49102da976603..2f3a878929954 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java @@ -45,9 +45,9 @@ public void setUp() { heartbeatIntervalMs, "group_id", Optional.empty(), + null, retryBackoffMs, - retryBackoffMaxMs, - true); + retryBackoffMaxMs); heartbeat = new Heartbeat(rebalanceConfig, time); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java index c75ee906e535b..7fa9f7e31f145 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java @@ -18,23 +18,27 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; +import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.Test; +import java.util.Set; + import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; class KafkaConsumerMetricsTest { private static final long METRIC_VALUE = 123L; - private static final String CONSUMER_GROUP_PREFIX = "consumer"; private static final String CONSUMER_METRIC_GROUP = "consumer-metrics"; private static final String COMMIT_SYNC_TIME_TOTAL = "commit-sync-time-ns-total"; private static final String COMMITTED_TIME_TOTAL = "committed-time-ns-total"; private final Metrics metrics = new Metrics(); private final KafkaConsumerMetrics consumerMetrics - = new KafkaConsumerMetrics(metrics, CONSUMER_GROUP_PREFIX); + = new KafkaConsumerMetrics(metrics); @Test public void shouldRecordCommitSyncTime() { @@ -64,14 +68,39 @@ public void shouldRemoveMetricsOnClose() { assertMetricRemoved(COMMITTED_TIME_TOTAL); } + @Test + public void checkMetricsAfterCreation() { + Set expectedMetrics = Set.of( + metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), + metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), + metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) + ); + expectedMetrics.forEach( + metricName -> assertTrue( + metrics.metrics().containsKey(metricName), + "Missing metric: " + metricName + ) + ); + consumerMetrics.close(); + expectedMetrics.forEach( + metricName -> assertFalse( + metrics.metrics().containsKey(metricName), + "Metric present after close: " + metricName + ) + ); + } + private void assertMetricRemoved(final String name) { assertNull(metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP))); } private void assertMetricValue(final String name) { assertEquals( - metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP)).metricValue(), - (double) METRIC_VALUE + (double) METRIC_VALUE, + metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP)).metricValue() ); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 4ff967e1f021a..0347423137b57 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -40,6 +40,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.util.ArrayList; import java.util.Collections; @@ -49,11 +51,11 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -246,10 +248,11 @@ public void testPropagateMetadataErrorWithErrorEvent() { assertEquals(authException, ((ErrorEvent) event).error()); } - @Test - public void testRecordUnsentRequestsQueueTime() throws Exception { + @ParameterizedTest + @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") + public void testRecordUnsentRequestsQueueTime(String groupName) throws Exception { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false, asyncConsumerMetrics)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); networkClientDelegate.add(unsentRequest); @@ -261,19 +264,19 @@ public void testRecordUnsentRequestsQueueTime() throws Exception { assertEquals( 0, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP) + metrics.metricName("unsent-requests-queue-size", groupName) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP) + metrics.metricName("unsent-requests-queue-time-avg", groupName) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP) + metrics.metricName("unsent-requests-queue-time-max", groupName) ).metricValue() ); } @@ -290,6 +293,7 @@ public NetworkClientDelegate newNetworkClientDelegate(boolean notifyMetadataErro properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(GROUP_ID_CONFIG, GROUP_ID); properties.put(REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_MS); + properties.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new NetworkClientDelegate(time, new ConsumerConfig(properties), logContext, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 3b7fe70ea4cf6..182900c0207ac 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -246,7 +246,7 @@ public void testFetchOffsetErrors() { assertTrue(subscriptions.hasValidPosition(tp0)); assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); - assertEquals(subscriptions.position(tp0).offset, 5L); + assertEquals(5L, subscriptions.position(tp0).offset); } @Test @@ -395,7 +395,7 @@ public void testListOffsetUpdateEpoch() { assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(metadata.updateRequested()); - assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals((long) epoch, 2)); + assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals(2, (long) epoch)); } @Test @@ -902,7 +902,7 @@ public void testGetOffsetsIncludesLeaderEpoch() { ListOffsetsRequest offsetRequest = (ListOffsetsRequest) body; int epoch = offsetRequest.topics().get(0).partitions().get(0).currentLeaderEpoch(); assertTrue(epoch != ListOffsetsResponse.UNKNOWN_EPOCH, "Expected Fetcher to set leader epoch in request"); - assertEquals(epoch, 99, "Expected leader epoch to match epoch from metadata update"); + assertEquals(99, epoch, "Expected leader epoch to match epoch from metadata update"); return true; } else { fail("Should have seen ListOffsetRequest"); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java index a48b32b43efb6..8a3617d61c752 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java @@ -102,8 +102,8 @@ public void testOkResponse() { assertTrue(result.partitionsToRetry().isEmpty()); assertTrue(result.endOffsets().containsKey(tp0)); assertEquals(result.endOffsets().get(tp0).errorCode(), Errors.NONE.code()); - assertEquals(result.endOffsets().get(tp0).leaderEpoch(), 1); - assertEquals(result.endOffsets().get(tp0).endOffset(), 10L); + assertEquals(1, result.endOffsets().get(tp0).leaderEpoch()); + assertEquals(10L, result.endOffsets().get(tp0).endOffset()); } @Test @@ -121,7 +121,7 @@ public void testUnauthorizedTopic() { consumerClient.pollNoWakeup(); assertTrue(future.failed()); - assertEquals(future.exception().getClass(), TopicAuthorizationException.class); + assertEquals(TopicAuthorizationException.class, future.exception().getClass()); assertTrue(((TopicAuthorizationException) future.exception()).unauthorizedTopics().contains(tp0.topic())); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java index cfbf13a1dab89..ed96b81790002 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java @@ -51,7 +51,6 @@ import org.mockito.ArgumentCaptor; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -774,7 +773,7 @@ public void testUpdatePositionsDoesNotResetPositionBeforeRetrievingOffsetsForNew // tp2 added to the assignment when the Offset Fetch request is already sent including tp1 only TopicPartition tp2 = new TopicPartition("topic2", 2); - Set initPartitions2 = new HashSet<>(Arrays.asList(tp1, tp2)); + Set initPartitions2 = Set.of(tp1, tp2); mockAssignedPartitionsMissingPositions(initPartitions2, initPartitions2, leaderAndEpoch); // tp2 requires a position, but shouldn't be reset after receiving the offset fetch response that will only diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java index 499c7f8f39da5..67628c513406a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java @@ -26,10 +26,13 @@ import org.junit.jupiter.api.Test; +import java.util.Map; import java.util.Optional; import java.util.Properties; +import java.util.UUID; import static org.apache.kafka.test.TestUtils.requiredConsumerConfig; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; @@ -65,8 +68,53 @@ public void testMemberStateListenerRegistered() { listener, Optional.empty() ).get(); - requestManagers.consumerMembershipManager.ifPresent( - membershipManager -> assertTrue(membershipManager.stateListeners().contains(listener)) + assertTrue(requestManagers.consumerMembershipManager.isPresent()); + assertTrue(requestManagers.streamsMembershipManager.isEmpty()); + assertTrue(requestManagers.streamsGroupHeartbeatRequestManager.isEmpty()); + + assertEquals(2, requestManagers.consumerMembershipManager.get().stateListeners().size()); + assertTrue(requestManagers.consumerMembershipManager.get().stateListeners().stream() + .anyMatch(m -> m instanceof CommitRequestManager)); + assertTrue(requestManagers.consumerMembershipManager.get().stateListeners().contains(listener)); + } + + @Test + public void testStreamMemberStateListenerRegistered() { + + final MemberStateListener listener = (memberEpoch, memberId) -> { }; + + final Properties properties = requiredConsumerConfig(); + properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroup"); + final ConsumerConfig config = new ConsumerConfig(properties); + final GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( + config, + GroupRebalanceConfig.ProtocolType.CONSUMER ); + final RequestManagers requestManagers = RequestManagers.supplier( + new MockTime(), + new LogContext(), + mock(BackgroundEventHandler.class), + mock(ConsumerMetadata.class), + mock(SubscriptionState.class), + mock(FetchBuffer.class), + config, + groupRebalanceConfig, + mock(ApiVersions.class), + mock(FetchMetricsManager.class), + () -> mock(NetworkClientDelegate.class), + Optional.empty(), + new Metrics(), + mock(OffsetCommitCallbackInvoker.class), + listener, + Optional.of(new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of())) + ).get(); + assertTrue(requestManagers.streamsMembershipManager.isPresent()); + assertTrue(requestManagers.streamsGroupHeartbeatRequestManager.isPresent()); + assertTrue(requestManagers.consumerMembershipManager.isEmpty()); + + assertEquals(2, requestManagers.streamsMembershipManager.get().stateListeners().size()); + assertTrue(requestManagers.streamsMembershipManager.get().stateListeners().stream() + .anyMatch(m -> m instanceof CommitRequestManager)); + assertTrue(requestManagers.streamsMembershipManager.get().stateListeners().contains(listener)); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java index 73efb010c8be0..a1814fd935c9c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java @@ -246,8 +246,8 @@ public void testCorruptedMessage() { // Record 1 then results in an empty batch batch = completedFetch.fetchRecords(deserializers, 10, false); - assertEquals(RecordDeserializationException.class, batch.getException().getClass()); - RecordDeserializationException thrown = (RecordDeserializationException) batch.getException(); + assertEquals(RecordDeserializationException.class, batch.getException().cause().getClass()); + RecordDeserializationException thrown = (RecordDeserializationException) batch.getException().cause(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.KEY, thrown.origin()); assertEquals(1, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); @@ -264,8 +264,8 @@ public void testCorruptedMessage() { // Record 2 then results in an empty batch, because record 1 has now been skipped batch = completedFetch.fetchRecords(deserializers, 10, false); - assertEquals(RecordDeserializationException.class, batch.getException().getClass()); - thrown = (RecordDeserializationException) batch.getException(); + assertEquals(RecordDeserializationException.class, batch.getException().cause().getClass()); + thrown = (RecordDeserializationException) batch.getException().cause(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, thrown.origin()); assertEquals(2L, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index 831e64632c716..a4268b7eca0a7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -47,6 +47,7 @@ import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.RequestHeaderData; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; +import org.apache.kafka.common.message.ShareFetchRequestData; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; @@ -116,6 +117,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -994,12 +996,14 @@ public void testShareFetchWithSubscriptionChangeMultipleNodes() { // Verify the builder data for node0. assertEquals(1, builder1.data().topics().size()); - assertEquals(tip0.topicId(), builder1.data().topics().get(0).topicId()); - assertEquals(1, builder1.data().topics().get(0).partitions().size()); - assertEquals(0, builder1.data().topics().get(0).partitions().get(0).partitionIndex()); - assertEquals(1, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().size()); - assertEquals(0L, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).firstOffset()); - assertEquals(2L, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).lastOffset()); + ShareFetchRequestData.FetchTopic fetchTopic = builder1.data().topics().stream().findFirst().get(); + assertEquals(tip0.topicId(), fetchTopic.topicId()); + assertEquals(1, fetchTopic.partitions().size()); + ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().stream().findFirst().get(); + assertEquals(0, fetchPartition.partitionIndex()); + assertEquals(1, fetchPartition.acknowledgementBatches().size()); + assertEquals(0L, fetchPartition.acknowledgementBatches().get(0).firstOffset()); + assertEquals(2L, fetchPartition.acknowledgementBatches().get(0).lastOffset()); assertEquals(1, builder1.data().forgottenTopicsData().size()); assertEquals(tip0.topicId(), builder1.data().forgottenTopicsData().get(0).topicId()); @@ -1008,9 +1012,10 @@ public void testShareFetchWithSubscriptionChangeMultipleNodes() { // Verify the builder data for node1. assertEquals(1, builder2.data().topics().size()); - assertEquals(tip1.topicId(), builder2.data().topics().get(0).topicId()); - assertEquals(1, builder2.data().topics().get(0).partitions().size()); - assertEquals(1, builder2.data().topics().get(0).partitions().get(0).partitionIndex()); + fetchTopic = builder2.data().topics().stream().findFirst().get(); + assertEquals(tip1.topicId(), fetchTopic.topicId()); + assertEquals(1, fetchTopic.partitions().size()); + assertEquals(1, fetchTopic.partitions().stream().findFirst().get().partitionIndex()); } @Test @@ -1049,9 +1054,10 @@ public void testShareFetchWithSubscriptionChangeMultipleNodesEmptyAcknowledgemen ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); assertEquals(1, builder.data().topics().size()); - assertEquals(tip1.topicId(), builder.data().topics().get(0).topicId()); - assertEquals(1, builder.data().topics().get(0).partitions().size()); - assertEquals(1, builder.data().topics().get(0).partitions().get(0).partitionIndex()); + ShareFetchRequestData.FetchTopic fetchTopic = builder.data().topics().stream().findFirst().get(); + assertEquals(tip1.topicId(), fetchTopic.topicId()); + assertEquals(1, fetchTopic.partitions().size()); + assertEquals(1, fetchTopic.partitions().stream().findFirst().get().partitionIndex()); assertEquals(0, builder.data().forgottenTopicsData().size()); } @@ -1402,12 +1408,86 @@ public void testPiggybackAcknowledgementsOnInitialShareSessionError() { shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); + assertEquals(1, pollResult.unsentRequests.size()); + ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); + assertEquals(1, builder.data().topics().size()); + // We should not add the acknowledgements as part of the request. + assertEquals(0, builder.data().topics().find(tip0.topicId()).partitions().find(0).acknowledgementBatches().size()); + + assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); + assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + } + + @Test + public void testPiggybackAcknowledgementsOnInitialShareSessionErrorSubscriptionChange() { + buildRequestManager(); + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + assignFromSubscribed(singleton(tp0)); sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + fetchRecords(); + + // Simulate a broker restart, but no leader change, this resets share session epoch to 0. + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + client.prepareResponse(fetchResponseWithTopLevelError(tip0, Errors.SHARE_SESSION_NOT_FOUND)); + networkClientDelegate.poll(time.timer(0)); + + // Simulate a metadata update with no topics in the response. + client.updateMetadata( + RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), + tp -> validLeaderEpoch, null, false)); + + // The acknowledgements for the initial fetch from tip0 are processed now and sent to the background thread. + Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + + assertEquals(0, completedAcknowledgements.size()); + + // Next fetch would not include any acknowledgements. + NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); + assertEquals(0, pollResult.unsentRequests.size()); + + // We should fail any waiting acknowledgements for tip-0 as it would have a share session epoch equal to 0. assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); } + @Test + public void testPiggybackAcknowledgementsOnInitialShareSession_ShareSessionNotFound() { + buildRequestManager(); + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + assignFromSubscribed(singleton(tp0)); + sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + + fetchRecords(); + + // The acknowledgements for the initial fetch from tip0 are processed now and sent to the background thread. + Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + + // We attempt to send the acknowledgements piggybacking on the fetch. + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + // Simulate a broker restart, but no leader change, this resets share session epoch to 0. + client.prepareResponse(fetchResponseWithTopLevelError(tip0, Errors.SHARE_SESSION_NOT_FOUND)); + networkClientDelegate.poll(time.timer(0)); + + // We would complete these acknowledgements with the error code from the response. + assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); + assertEquals(Errors.SHARE_SESSION_NOT_FOUND.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + + // Next fetch would proceed as expected and would not include any acknowledgements. + NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); + assertEquals(1, pollResult.unsentRequests.size()); + ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); + assertEquals(0, builder.data().topics().find(topicId).partitions().find(0).acknowledgementBatches().size()); + } + @Test public void testInvalidDefaultRecordBatch() { buildRequestManager(); @@ -2381,6 +2461,36 @@ void testWhenLeadershipChangedAfterDisconnected() { assertEquals(1, fetchedRecords.size()); } + @Test + public void testCloseInternalClosesShareFetchMetricsManager() throws Exception { + buildRequestManager(); + + // Define all sensor names that should be created and removed + String[] sensorNames = { + "fetch-throttle-time", + "bytes-fetched", + "records-fetched", + "fetch-latency", + "sent-acknowledgements", + "failed-acknowledgements" + }; + + // Verify that sensors exist before closing + for (String sensorName : sensorNames) { + assertNotNull(metrics.getSensor(sensorName), + "Sensor " + sensorName + " should exist before closing"); + } + + // Close the request manager + shareConsumeRequestManager.close(); + + // Verify that all sensors are removed after closing + for (String sensorName : sensorNames) { + assertNull(metrics.getSensor(sensorName), + "Sensor " + sensorName + " should be removed after closing"); + } + } + private ShareFetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors error) { Map partitions = Map.of(tp, new ShareFetchResponseData.PartitionData() @@ -2573,6 +2683,7 @@ private void buildDependencies(MetricConfig metricConfig, properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); + properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConsumerConfig config = new ConsumerConfig(properties); networkClientDelegate = spy(new TestableNetworkClientDelegate( time, config, logContext, client, metadata, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java index 64c729730fbc4..5dddd0772df2f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java @@ -26,11 +26,13 @@ import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgeOnCloseEvent; +import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackEvent; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackRegistrationEvent; import org.apache.kafka.clients.consumer.internals.events.ShareFetchEvent; import org.apache.kafka.clients.consumer.internals.events.ShareSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.ShareUnsubscribeEvent; import org.apache.kafka.clients.consumer.internals.events.StopFindCoordinatorOnCloseEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; @@ -41,7 +43,9 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -50,6 +54,8 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentMatchers; import org.mockito.InOrder; import org.mockito.Mockito; @@ -72,8 +78,10 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonList; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -204,11 +212,20 @@ public void testFailConstructor() { props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id"); props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class"); final ConsumerConfig config = new ConsumerConfig(props); - KafkaException ce = assertThrows( + + try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + KafkaException ce = assertThrows( KafkaException.class, () -> newConsumer(config)); - assertTrue(ce.getMessage().contains("Failed to construct Kafka share consumer"), "Unexpected exception message: " + ce.getMessage()); - assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); + assertTrue(ce.getMessage().contains("Failed to construct Kafka share consumer"), "Unexpected exception message: " + ce.getMessage()); + assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); + + boolean npeLogged = appender.getEvents().stream() + .flatMap(event -> event.getThrowableInfo().stream()) + .anyMatch(str -> str.contains("NullPointerException")); + + assertFalse(npeLogged, "Unexpected NullPointerException during consumer construction"); + } } @Test @@ -672,6 +689,32 @@ public void testEnsurePollEventSentOnConsumerPoll() { verify(applicationEventHandler).addAndGet(any(ShareAcknowledgeOnCloseEvent.class)); } + @ParameterizedTest + @EnumSource(value = Errors.class, names = {"TOPIC_AUTHORIZATION_FAILED", "GROUP_AUTHORIZATION_FAILED", "INVALID_TOPIC_EXCEPTION"}) + public void testCloseWithBackgroundQueueErrorsAfterUnsubscribe(Errors error) { + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); + consumer = newConsumer(subscriptions); + + // Complete the acknowledge on close event successfully + completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); + + // Complete the unsubscribe event successfully + completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); + + // Mock the applicationEventHandler to add errors to the queue after unsubscribe + doAnswer(invocation -> { + // Add errors to the queue after unsubscribe event is processed + backgroundEventQueue.add(new ErrorEvent(error.exception())); + return null; + }).when(applicationEventHandler).add(any(StopFindCoordinatorOnCloseEvent.class)); + + // Close should complete successfully despite the errors in the background queue + assertDoesNotThrow(() -> consumer.close()); + + // Verify that the background queue was processed + assertTrue(backgroundEventQueue.isEmpty(), "Background queue should be empty after close"); + } + private Properties requiredConsumerPropertiesAndGroupId(final String groupId) { final Properties props = requiredConsumerProperties(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); @@ -740,6 +783,22 @@ public void testProcessBackgroundEventsWithoutDelay() { assertEquals(1000, timer.remainingMs()); } + @Test + public void testRecordBackgroundEventQueueSize() { + consumer = newConsumer(); + Metrics metrics = consumer.metricsRegistry(); + AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics(); + + ShareAcknowledgementCommitCallbackEvent event = new ShareAcknowledgementCommitCallbackEvent(Map.of()); + backgroundEventQueue.add(event); + asyncConsumerMetrics.recordBackgroundEventQueueSize(1); + + assertEquals(1, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_SHARE_METRIC_GROUP)).metricValue()); + + consumer.processBackgroundEvents(); + assertEquals(0, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_SHARE_METRIC_GROUP)).metricValue()); + } + /** * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents} * handles the case where the {@link Future} does not complete within the timeout. diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java index f7039e838b775..2a06324f72a7b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java @@ -33,8 +33,6 @@ import org.junit.jupiter.api.Test; import java.time.Duration; -import java.util.Arrays; -import java.util.HashSet; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; @@ -181,6 +179,6 @@ private ShareCompletedFetch completedFetch(TopicIdPartition tp) { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicIdPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java index 27b44966f0a8d..79b5deecadb56 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java @@ -30,8 +30,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.IOException; + import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; class ShareFetchMetricsManagerTest { private static final double EPSILON = 0.0001; @@ -114,6 +118,41 @@ public void testRecordsFetched() { assertEquals(8, (double) getMetric(shareFetchMetricsRegistry.recordsPerRequestAvg).metricValue(), EPSILON); } + @Test + public void testAcknowledgements() { + shareFetchMetricsManager.recordAcknowledgementSent(5); + shareFetchMetricsManager.recordFailedAcknowledgements(2); + + assertEquals(5, (double) getMetric(shareFetchMetricsRegistry.acknowledgementSendTotal).metricValue()); + assertEquals(2, (double) getMetric(shareFetchMetricsRegistry.acknowledgementErrorTotal).metricValue()); + } + + @Test + public void testCloseRemovesAllSensors() throws IOException { + // Define all sensor names that should be created and removed + String[] sensorNames = { + "fetch-throttle-time", + "bytes-fetched", + "records-fetched", + "fetch-latency", + "sent-acknowledgements", + "failed-acknowledgements" + }; + + // Verify that sensors exist before closing + for (String sensorName : sensorNames) { + assertNotNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should exist before closing"); + } + + // Close the metrics manager + shareFetchMetricsManager.close(); + + // Verify that all sensors are removed + for (String sensorName : sensorNames) { + assertNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should be removed after closing"); + } + } + private KafkaMetric getMetric(MetricNameTemplate name) { return metrics.metric(metrics.metricInstance(name)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java index 856cdd29493aa..8952271b250d5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java @@ -60,6 +60,7 @@ import java.util.concurrent.TimeUnit; import static org.apache.kafka.clients.consumer.internals.ShareHeartbeatRequestManager.SHARE_PROTOCOL_NOT_SUPPORTED_MSG; +import static org.apache.kafka.clients.consumer.internals.ShareHeartbeatRequestManager.SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -439,7 +440,7 @@ public void testUnsupportedVersionGeneratedOnTheBroker(String errorMsg) { } @ParameterizedTest - @ValueSource(strings = {SHARE_PROTOCOL_NOT_SUPPORTED_MSG}) + @ValueSource(strings = {SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG}) public void testUnsupportedVersionGeneratedOnTheClient(String errorMsg) { mockResponseWithException(new UnsupportedVersionException(errorMsg), false); @@ -692,11 +693,11 @@ private ClientResponse createHeartbeatResponse( private ClientResponse createHeartbeatResponseWithException( final NetworkClientDelegate.UnsentRequest request, final UnsupportedVersionException exception, - final boolean isFromClient + final boolean isFromBroker ) { ShareGroupHeartbeatResponse response = null; - if (!isFromClient) { - response = new ShareGroupHeartbeatResponse(null); + if (isFromBroker) { + response = new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData().setErrorCode(Errors.UNSUPPORTED_VERSION.code())); } return new ClientResponse( new RequestHeader(ApiKeys.SHARE_GROUP_HEARTBEAT, ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion(), "client-id", 1), @@ -705,7 +706,7 @@ private ClientResponse createHeartbeatResponseWithException( time.milliseconds(), time.milliseconds(), false, - exception, + isFromBroker ? null : exception, null, response); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java index 7c4c5684bcce0..e95f8dd86f26b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData.Assignment; -import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareGroupHeartbeatRequest; @@ -57,7 +56,6 @@ import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; import static org.apache.kafka.common.requests.ShareGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; @@ -314,6 +312,10 @@ private void assertTransitionToUnsubscribeOnHBSentAndWaitForResponseToCompleteLe membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData.Assignment(), membershipManager.memberId())); + assertFalse(sendLeave.isDone(), "Send leave operation should not complete until a leave response is received"); + + membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); + assertSendLeaveCompleted(membershipManager, sendLeave); } @@ -520,6 +522,9 @@ public void testHeartbeatSuccessfulResponseWhenLeavingGroupCompletesLeave() { assertFalse(leaveResult.isDone()); membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(createAssignment(true), membershipManager.memberId())); + assertFalse(leaveResult.isDone()); + + membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); assertSendLeaveCompleted(membershipManager, leaveResult); } @@ -563,10 +568,46 @@ public void testIgnoreHeartbeatResponseWhenNotInGroup(MemberState state) { assertEquals(state, membershipManager.state()); verify(responseData, never()).memberId(); - verify(responseData, never()).memberEpoch(); + // In unsubscribed, we check if we received a leave group response, so we do verify member epoch. + if (state != MemberState.UNSUBSCRIBED) { + verify(responseData, never()).memberEpoch(); + } verify(responseData, never()).assignment(); } + @Test + public void testIgnoreLeaveResponseWhenNotLeavingGroup() { + ShareMembershipManager membershipManager = createMemberInStableState(); + + CompletableFuture leaveResult = membershipManager.leaveGroup(); + + // Send leave request, transitioning to UNSUBSCRIBED state + membershipManager.onHeartbeatRequestGenerated(); + assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); + + // Receive a previous heartbeat response, which should be ignored + membershipManager.onHeartbeatSuccess(new ShareGroupHeartbeatResponse( + new ShareGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(membershipManager.memberId()) + .setMemberEpoch(MEMBER_EPOCH) + )); + assertFalse(leaveResult.isDone()); + + // Receive a leave heartbeat response, which should unblock the consumer + membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); + assertTrue(leaveResult.isDone()); + + // Share unblocks and updates subscription + membershipManager.onSubscriptionUpdated(); + membershipManager.onConsumerPoll(); + + membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); + + assertEquals(MemberState.JOINING, membershipManager.state()); + assertEquals(0, membershipManager.memberEpoch()); + } + @Test public void testLeaveGroupWhenStateIsReconciling() { ShareMembershipManager membershipManager = mockJoinAndReceiveAssignment(false); @@ -945,7 +986,6 @@ public void testReconcileNewPartitionsAssignedWhenNoPartitionOwned() { @Test public void testReconcileNewPartitionsAssignedWhenOtherPartitionsOwned() { - // ANDREW MANGLED THIS Uuid topicId = Uuid.randomUuid(); String topicName = "topic1"; TopicIdPartition ownedPartition = new TopicIdPartition(topicId, new TopicPartition(topicName, 0)); @@ -1228,23 +1268,6 @@ private ShareMembershipManager mockStaleMember() { return membershipManager; } - private void mockPartitionOwnedAndNewPartitionAdded(String topicName, - int partitionOwned, - int partitionAdded, - CounterConsumerRebalanceListener listener, - ShareMembershipManager membershipManager) { - Uuid topicId = Uuid.randomUuid(); - TopicPartition owned = new TopicPartition(topicName, partitionOwned); - when(subscriptionState.assignedPartitions()).thenReturn(Collections.singleton(owned)); - membershipManager.updateAssignment(Collections.singletonMap(topicId, mkSortedSet(partitionOwned))); - when(metadata.topicNames()).thenReturn(Collections.singletonMap(topicId, topicName)); - when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); - when(subscriptionState.rebalanceListener()).thenReturn(Optional.ofNullable(listener)); - - // Receive assignment adding a new partition - receiveAssignment(topicId, Arrays.asList(partitionOwned, partitionAdded), membershipManager); - } - private SortedSet topicIdPartitionsSet(Uuid topicId, String topicName, int... partitions) { SortedSet topicIdPartitions = new TreeSet<>(new Utils.TopicIdPartitionComparator()); @@ -1573,17 +1596,6 @@ private void mockLeaveGroup() { doNothing().when(subscriptionState).markPendingRevocation(anySet()); } - private void mockPrepareLeaving(ShareMembershipManager membershipManager) { - String topicName = "topic1"; - TopicPartition ownedPartition = new TopicPartition(topicName, 0); - - // Start leaving group, blocked waiting for callback to complete. - when(subscriptionState.assignedPartitions()).thenReturn(Collections.singleton(ownedPartition)); - when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); - doNothing().when(subscriptionState).markPendingRevocation(anySet()); - membershipManager.leaveGroup(); - } - private void testStateUpdateOnFatalFailure(ShareMembershipManager membershipManager) { String memberId = membershipManager.memberId(); int lastEpoch = membershipManager.memberEpoch(); @@ -1608,6 +1620,13 @@ private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse( .setMemberEpoch(MEMBER_EPOCH) .setAssignment(assignment)); } + + private ShareGroupHeartbeatResponse createShareGroupLeaveResponse(String memberId) { + return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(memberId) + .setMemberEpoch(ShareGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH)); + } private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponseWithError(String memberId) { return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData() @@ -1641,10 +1660,6 @@ private ShareGroupHeartbeatResponseData.Assignment createAssignment(boolean mock )); } - private KafkaMetric getMetric(final String name) { - return metrics.metrics().get(metrics.metricName(name, CONSUMER_SHARE_METRIC_GROUP_PREFIX + "-coordinator-metrics")); - } - private ShareMembershipManager memberJoinWithAssignment() { Uuid topicId = Uuid.randomUuid(); ShareMembershipManager membershipManager = mockJoinAndReceiveAssignment(true); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java index 5a52b7bc35fe1..07e490ae4c4d0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java @@ -31,6 +31,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import java.util.ArrayList; import java.util.Collections; @@ -149,15 +151,15 @@ private static LinkedHashMap topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicIds, topicNames, "foo"); + Uuid fooId = addTopicId(topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); handler.addPartitionToFetch(foo0, null); @@ -177,7 +179,7 @@ public void testShareSession() { handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Test a fetch request which adds one partition - Uuid barId = addTopicId(topicIds, topicNames, "bar"); + Uuid barId = addTopicId(topicNames, "bar"); TopicIdPartition bar0 = new TopicIdPartition(barId, 0, "bar"); handler.addPartitionToFetch(foo0, null); handler.addPartitionToFetch(foo1, null); @@ -199,7 +201,7 @@ public void testShareSession() { handler.handleResponse(resp2, ApiKeys.SHARE_FETCH.latestVersion(true)); // A top-level error code will reset the session epoch - ShareFetchResponse resp3 = ShareFetchResponse.of(Errors.INVALID_SHARE_SESSION_EPOCH, 0, new LinkedHashMap<>(), List.of(), 0); + ShareFetchResponse resp3 = ShareFetchResponse.of(error, 0, new LinkedHashMap<>(), List.of(), 0); handler.handleResponse(resp3, ApiKeys.SHARE_FETCH.latestVersion(true)); ShareFetchRequestData requestData4 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -222,10 +224,9 @@ public void testPartitionRemoval() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicIds, topicNames, "foo"); - Uuid barId = addTopicId(topicIds, topicNames, "bar"); + Uuid fooId = addTopicId(topicNames, "foo"); + Uuid barId = addTopicId(topicNames, "bar"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); TopicIdPartition bar0 = new TopicIdPartition(barId, 0, "bar"); @@ -289,9 +290,8 @@ public void testTopicIdReplaced() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId1 = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId1 = addTopicId(topicNames, "foo"); TopicIdPartition tp = new TopicIdPartition(topicId1, 0, "foo"); handler.addPartitionToFetch(tp, null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -309,7 +309,7 @@ public void testTopicIdReplaced() { handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Try to add a new topic ID - Uuid topicId2 = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId2 = addTopicId(topicNames, "foo"); TopicIdPartition tp2 = new TopicIdPartition(topicId2, 0, "foo"); // Use the same data besides the topic ID handler.addPartitionToFetch(tp2, null); @@ -334,9 +334,8 @@ public void testPartitionForgottenOnAcknowledgeOnly() { ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); // We want to test when all topics are removed from the session - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId = addTopicId(topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(topicId, 0, "foo"); handler.addPartitionToFetch(foo0, null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -369,9 +368,8 @@ public void testForgottenPartitions() { ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); // We want to test when all topics are removed from the session - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId = addTopicId(topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(topicId, 0, "foo"); handler.addPartitionToFetch(foo0, null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -402,9 +400,8 @@ public void testAddNewIdAfterTopicRemovedFromSession() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId = addTopicId(topicNames, "foo"); handler.addPartitionToFetch(new TopicIdPartition(topicId, 0, "foo"), null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); assertMapsEqual(reqMap(new TopicIdPartition(topicId, 0, "foo")), @@ -428,7 +425,7 @@ public void testAddNewIdAfterTopicRemovedFromSession() { handler.handleResponse(resp2, ApiKeys.SHARE_FETCH.latestVersion(true)); // After the topic is removed, add a recreated topic with a new ID - Uuid topicId2 = addTopicId(topicIds, topicNames, "foo"); + Uuid topicId2 = addTopicId(topicNames, "foo"); handler.addPartitionToFetch(new TopicIdPartition(topicId2, 0, "foo"), null); ShareFetchRequestData requestData3 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -443,9 +440,8 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); - Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicIds, topicNames, "foo"); + Uuid fooId = addTopicId(topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); Acknowledgements acknowledgements = Acknowledgements.empty(); @@ -462,7 +458,7 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { ShareFetchRequestData requestData = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); // We should have cleared the unsent acknowledgements before this ShareFetch. - assertEquals(0, requestData.topics().get(0).partitions().get(0).acknowledgementBatches().size()); + assertEquals(0, requestData.topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().size()); ArrayList expectedToSend1 = new ArrayList<>(); expectedToSend1.add(new TopicIdPartition(fooId, 1, "foo")); @@ -470,9 +466,7 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { assertEquals(memberId.toString(), requestData.memberId()); } - private Uuid addTopicId(Map topicIds, Map topicNames, String name) { - // If the same topic name is added more than once, the latest mapping will be in the - // topicIds, but all mappings will be in topicNames. This is needed in the replace tests. + private Uuid addTopicId(Map topicNames, String name) { Uuid id = Uuid.randomUuid(); topicNames.put(id, name); return id; diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManagerTest.java index d43bcfc7891fb..f4a2726b9e570 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManagerTest.java @@ -153,7 +153,7 @@ class StreamsGroupHeartbeatRequestManagerTest { List.of( new StreamsGroupHeartbeatResponseData.EndpointToPartitions() .setUserEndpoint(new StreamsGroupHeartbeatResponseData.Endpoint().setHost("localhost").setPort(8080)) - .setPartitions(List.of( + .setActivePartitions(List.of( new StreamsGroupHeartbeatResponseData.TopicPartition().setTopic("topic").setPartitions(List.of(0))) ) ); @@ -439,14 +439,10 @@ public void testSendingLeaveHeartbeatIfPollTimerExpired() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs); - }); + (mock, context) -> when(mock.heartbeatIntervalMs()).thenReturn(heartbeatIntervalMs)); final MockedConstruction pollTimerMockedConstruction = mockConstruction( Timer.class, - (mock, context) -> { - when(mock.isExpired()).thenReturn(true); - }); + (mock, context) -> when(mock.isExpired()).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -473,14 +469,10 @@ public void testNotSendingLeaveHeartbeatIfPollTimerExpiredAndMemberIsLeaving() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.timeToNextHeartbeatMs(time.milliseconds())).thenReturn(timeToNextHeartbeatMs); - }); + (mock, context) -> when(mock.timeToNextHeartbeatMs(time.milliseconds())).thenReturn(timeToNextHeartbeatMs)); final MockedConstruction pollTimerMockedConstruction = mockConstruction( Timer.class, - (mock, context) -> { - when(mock.isExpired()).thenReturn(true); - }); + (mock, context) -> when(mock.isExpired()).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -508,14 +500,10 @@ public void testSendingLeaveHeartbeatRequestWhenPollTimerExpired() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction pollTimerMockedConstruction = mockConstruction( Timer.class, - (mock, context) -> { - when(mock.isExpired()).thenReturn(true); - }) + (mock, context) -> when(mock.isExpired()).thenReturn(true)) ) { final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0); @@ -551,9 +539,7 @@ public void testSendingHeartbeatRequest() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }) + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)) ) { final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); final HeartbeatRequestState heartbeatRequestState = heartbeatRequestStateMockedConstruction.constructed().get(0); @@ -591,9 +577,9 @@ public void testSendingHeartbeatRequest() { .get(new StreamsRebalanceData.HostInfo( ENDPOINT_TO_PARTITIONS.get(0).userEndpoint().host(), ENDPOINT_TO_PARTITIONS.get(0).userEndpoint().port()) - ); - assertEquals(ENDPOINT_TO_PARTITIONS.get(0).partitions().get(0).topic(), topicPartitions.get(0).topic()); - assertEquals(ENDPOINT_TO_PARTITIONS.get(0).partitions().get(0).partitions().get(0), topicPartitions.get(0).partition()); + ).activePartitions(); + assertEquals(ENDPOINT_TO_PARTITIONS.get(0).activePartitions().get(0).topic(), topicPartitions.get(0).topic()); + assertEquals(ENDPOINT_TO_PARTITIONS.get(0).activePartitions().get(0).partitions().get(0), topicPartitions.get(0).partition()); assertEquals( 1.0, metrics.metric(metrics.metricName("heartbeat-total", "consumer-coordinator-metrics")).metricValue() @@ -1001,9 +987,7 @@ public void testCoordinatorDisconnectFailureWhileSending() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1032,9 +1016,7 @@ public void testUnsupportedVersionFailureWhileSending() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1073,9 +1055,7 @@ public void testFatalFailureWhileSending() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1111,9 +1091,7 @@ public void testNotCoordinatorAndCoordinatorNotAvailableErrorResponse(final Erro try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1145,9 +1123,7 @@ public void testCoordinatorLoadInProgressErrorResponse() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1173,9 +1149,7 @@ public void testGroupAuthorizationFailedErrorResponse() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class); final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class) @@ -1212,9 +1186,7 @@ public void testTopicAuthorizationFailedErrorResponse() { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class); final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class) @@ -1261,9 +1233,7 @@ public void testKnownFatalErrorResponse(final Errors error) { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class); final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class) @@ -1312,9 +1282,7 @@ public void testFencedMemberOrUnknownMemberIdErrorResponse(final Errors error) { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class) ) { @@ -1343,9 +1311,7 @@ public void testOtherErrorResponse(final Errors error) { try ( final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.canSendRequest(time.milliseconds())).thenReturn(true); - }); + (mock, context) -> when(mock.canSendRequest(time.milliseconds())).thenReturn(true)); final MockedConstruction heartbeatStateMockedConstruction = mockConstruction( StreamsGroupHeartbeatRequestManager.HeartbeatState.class); final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister(StreamsGroupHeartbeatRequestManager.class) @@ -1424,14 +1390,11 @@ public void testPollOnCloseWhenIsLeaving() { @Test public void testMaximumTimeToWaitPollTimerExpired() { try ( - final MockedConstruction timerMockedConstruction = mockConstruction(Timer.class, (mock, context) -> { - when(mock.isExpired()).thenReturn(true); - }); + final MockedConstruction timerMockedConstruction = + mockConstruction(Timer.class, (mock, context) -> when(mock.isExpired()).thenReturn(true)); final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.requestInFlight()).thenReturn(false); - }) + (mock, context) -> when(mock.requestInFlight()).thenReturn(false)) ) { final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); final Timer pollTimer = timerMockedConstruction.constructed().get(0); @@ -1450,9 +1413,7 @@ public void testMaximumTimeToWaitWhenHeartbeatShouldBeSentImmediately() { final MockedConstruction timerMockedConstruction = mockConstruction(Timer.class); final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.requestInFlight()).thenReturn(false); - }) + (mock, context) -> when(mock.requestInFlight()).thenReturn(false)) ) { final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); final Timer pollTimer = timerMockedConstruction.constructed().get(0); @@ -1473,9 +1434,8 @@ public void testMaximumTimeToWaitWhenHeartbeatShouldBeNotSentImmediately(final b final long remainingMs = 12L; final long timeToNextHeartbeatMs = 6L; try ( - final MockedConstruction timerMockedConstruction = mockConstruction(Timer.class, (mock, context) -> { - when(mock.remainingMs()).thenReturn(remainingMs); - }); + final MockedConstruction timerMockedConstruction = + mockConstruction(Timer.class, (mock, context) -> when(mock.remainingMs()).thenReturn(remainingMs)); final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, (mock, context) -> { @@ -1500,14 +1460,11 @@ public void testMaximumTimeToWaitWhenHeartbeatShouldBeNotSentImmediately(final b public void testMaximumTimeToWaitSelectingMinimumWaitTime(final long remainingMs, final long timeToNextHeartbeatMs) { try ( - final MockedConstruction timerMockedConstruction = mockConstruction(Timer.class, (mock, context) -> { - when(mock.remainingMs()).thenReturn(remainingMs); - }); + final MockedConstruction timerMockedConstruction = + mockConstruction(Timer.class, (mock, context) -> when(mock.remainingMs()).thenReturn(remainingMs)); final MockedConstruction heartbeatRequestStateMockedConstruction = mockConstruction( HeartbeatRequestState.class, - (mock, context) -> { - when(mock.timeToNextHeartbeatMs(anyLong())).thenReturn(timeToNextHeartbeatMs); - }) + (mock, context) -> when(mock.timeToNextHeartbeatMs(anyLong())).thenReturn(timeToNextHeartbeatMs)) ) { final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); final Timer pollTimer = timerMockedConstruction.constructed().get(0); @@ -1520,11 +1477,42 @@ public void testMaximumTimeToWaitSelectingMinimumWaitTime(final long remainingMs } } + @Test + public void testResetPollTimer() { + try (final MockedConstruction pollTimerMockedConstruction = mockConstruction(Timer.class)) { + final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); + final Timer pollTimer = pollTimerMockedConstruction.constructed().get(1); + + heartbeatRequestManager.resetPollTimer(time.milliseconds()); + verify(pollTimer).update(time.milliseconds()); + verify(pollTimer).isExpired(); + verify(pollTimer).reset(DEFAULT_MAX_POLL_INTERVAL_MS); + } + } + + @Test + public void testResetPollTimerWhenExpired() { + try (final MockedConstruction pollTimerMockedConstruction = mockConstruction(Timer.class)) { + final StreamsGroupHeartbeatRequestManager heartbeatRequestManager = createStreamsGroupHeartbeatRequestManager(); + final Timer pollTimer = pollTimerMockedConstruction.constructed().get(1); + + when(pollTimer.isExpired()).thenReturn(true); + heartbeatRequestManager.resetPollTimer(time.milliseconds()); + verify(pollTimer).update(time.milliseconds()); + verify(pollTimer).isExpired(); + verify(pollTimer).isExpiredBy(); + verify(membershipManager).memberId(); + verify(membershipManager).maybeRejoinStaleMember(); + verify(pollTimer).reset(DEFAULT_MAX_POLL_INTERVAL_MS); + } + } + private static ConsumerConfig config() { Properties prop = new Properties(); prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); prop.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS)); + prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new ConsumerConfig(prop); } @@ -1593,4 +1581,4 @@ private static void assertTaskIdsEquals(final List activeTasks = + Set.of(new StreamsRebalanceData.TaskId(SUBTOPOLOGY_ID_0, PARTITION_0)); + joining(); + reconcile(makeHeartbeatResponseWithActiveTasks(SUBTOPOLOGY_ID_0, List.of(PARTITION_0))); + final CompletableFuture onTasksAssignedCallbackExecutedSetup = + verifyOnTasksAssignedCallbackNeededEventAddedToBackgroundEventHandler(activeTasks, Set.of(), Set.of()); + acknowledging(onTasksAssignedCallbackExecutedSetup); + stable(); + + CompletableFuture leaveResult = membershipManager.leaveGroup(); + final CompletableFuture onTasksRevokedCallbackExecutedSetup = + verifyOnTasksRevokedCallbackNeededEventAddedToBackgroundEventHandler(activeTasks); + onTasksRevokedCallbackExecutedSetup.complete(null); + + // Send leave request, transitioning to UNSUBSCRIBED state + membershipManager.onHeartbeatRequestGenerated(); + assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); + + // Receive a previous heartbeat response, which should be ignored + membershipManager.onHeartbeatSuccess(new StreamsGroupHeartbeatResponse( + new StreamsGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(membershipManager.memberId()) + .setMemberEpoch(MEMBER_EPOCH) + )); + assertFalse(leaveResult.isDone()); + + // Receive a leave heartbeat response, which should unblock the consumer + membershipManager.onHeartbeatSuccess(new StreamsGroupHeartbeatResponse( + new StreamsGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(membershipManager.memberId()) + .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) + )); + assertTrue(leaveResult.isDone()); + + // Consumer unblocks and updates subscription + membershipManager.onSubscriptionUpdated(); + membershipManager.onConsumerPoll(); + + membershipManager.onHeartbeatSuccess(new StreamsGroupHeartbeatResponse( + new StreamsGroupHeartbeatResponseData() + .setErrorCode(Errors.NONE.code()) + .setMemberId(membershipManager.memberId()) + .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) + )); + + assertEquals(MemberState.JOINING, membershipManager.state()); + assertEquals(0, membershipManager.memberEpoch()); + } + private void testLeaveGroupWhenNotInGroup(final Supplier> leaveGroup) { final CompletableFuture future = leaveGroup.get(); @@ -1216,10 +1281,18 @@ public void testLeaveGroupWhenInGroupWithAssignment() { assertEquals(onGroupLeft, onGroupLeftAfterRevocationCallback); membershipManager.onHeartbeatRequestGenerated(); verifyInStateUnsubscribed(membershipManager); + + // Don't unblock unsubscribe if this is not a leave group response membershipManager.onHeartbeatSuccess(makeHeartbeatResponseWithActiveTasks(SUBTOPOLOGY_ID_0, List.of(PARTITION_0), MEMBER_EPOCH + 1)); + + assertFalse(onGroupLeft.isDone()); + verify(memberStateListener, never()).onMemberEpochUpdated(Optional.of(MEMBER_EPOCH + 1), membershipManager.memberId()); + + // Unblock unsubscribe when this is not a leave group response + membershipManager.onHeartbeatSuccess(makeHeartbeatResponse(List.of(), List.of(), List.of(), LEAVE_GROUP_MEMBER_EPOCH)); + assertTrue(onGroupLeft.isDone()); assertFalse(onGroupLeft.isCompletedExceptionally()); - verify(memberStateListener, never()).onMemberEpochUpdated(Optional.of(MEMBER_EPOCH + 1), membershipManager.memberId()); } @Test @@ -1252,10 +1325,18 @@ public void testLeaveGroupOnCloseWhenInGroupWithAssignment() { assertFalse(onGroupLeft.isDone()); membershipManager.onHeartbeatRequestGenerated(); verifyInStateUnsubscribed(membershipManager); + + // Don't unblock unsubscribe if this is not a leave group response membershipManager.onHeartbeatSuccess(makeHeartbeatResponseWithActiveTasks(SUBTOPOLOGY_ID_0, List.of(PARTITION_0), MEMBER_EPOCH + 1)); + + assertFalse(onGroupLeft.isDone()); + verify(memberStateListener, never()).onMemberEpochUpdated(Optional.of(MEMBER_EPOCH + 1), membershipManager.memberId()); + + // Unblock unsubscribe when this is not a leave group response + membershipManager.onHeartbeatSuccess(makeHeartbeatResponse(List.of(), List.of(), List.of(), LEAVE_GROUP_MEMBER_EPOCH)); + assertTrue(onGroupLeft.isDone()); assertFalse(onGroupLeft.isCompletedExceptionally()); - verify(memberStateListener, never()).onMemberEpochUpdated(Optional.of(MEMBER_EPOCH + 1), membershipManager.memberId()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java index 3caa298ee164b..606ba0b735027 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java @@ -62,7 +62,7 @@ public void taskIdShouldNotAcceptNulls() { public void testTaskIdCompareTo() { final StreamsRebalanceData.TaskId task = new StreamsRebalanceData.TaskId("subtopologyId1", 1); - assertTrue(task.compareTo(new StreamsRebalanceData.TaskId(task.subtopologyId(), task.partitionId())) == 0); + assertEquals(0, task.compareTo(new StreamsRebalanceData.TaskId(task.subtopologyId(), task.partitionId()))); assertTrue(task.compareTo(new StreamsRebalanceData.TaskId(task.subtopologyId() + "1", task.partitionId())) < 0); assertTrue(task.compareTo(new StreamsRebalanceData.TaskId(task.subtopologyId(), task.partitionId() + 1)) < 0); assertTrue(new StreamsRebalanceData.TaskId(task.subtopologyId() + "1", task.partitionId()).compareTo(task) > 0); @@ -90,9 +90,9 @@ public void emptyAssignmentShouldNotBeModifiable() { @Test public void assignmentShouldNotBeModifiable() { final StreamsRebalanceData.Assignment assignment = new StreamsRebalanceData.Assignment( - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 1))), - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 2))), - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 3))) + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 1)), + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 2)), + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 3)) ); assertThrows( @@ -220,8 +220,8 @@ public void subtopologyShouldNotAcceptNulls() { @Test public void subtopologyShouldNotBeModifiable() { final StreamsRebalanceData.Subtopology subtopology = new StreamsRebalanceData.Subtopology( - new HashSet<>(Set.of("sourceTopic1")), - new HashSet<>(Set.of("repartitionSinkTopic1")), + Set.of("sourceTopic1"), + Set.of("repartitionSinkTopic1"), Map.of("repartitionSourceTopic1", new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.of((short) 1), Map.of())) .entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)), @@ -420,4 +420,21 @@ public void streamsRebalanceDataShouldBeConstructedWithShutDownRequestedSetFalse assertFalse(streamsRebalanceData.shutdownRequested()); } + + @Test + public void streamsRebalanceDataShouldBeConstructedWithEmptyStatuses() { + final UUID processId = UUID.randomUUID(); + final Optional endpoint = Optional.of(new StreamsRebalanceData.HostInfo("localhost", 9090)); + final Map subtopologies = Map.of(); + final Map clientTags = Map.of("clientTag1", "clientTagValue1"); + final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData( + processId, + endpoint, + subtopologies, + clientTags + ); + + assertTrue(streamsRebalanceData.statuses().isEmpty()); + } + } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvokerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvokerTest.java new file mode 100644 index 0000000000000..749a4594ab825 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceListenerInvokerTest.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.utils.LogContext; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.STRICT_STUBS) +public class StreamsRebalanceListenerInvokerTest { + + @Mock + private StreamsRebalanceListener mockListener; + + @Mock + private StreamsRebalanceData streamsRebalanceData; + + private StreamsRebalanceListenerInvoker invoker; + private final LogContext logContext = new LogContext(); + + @BeforeEach + public void setup() { + invoker = new StreamsRebalanceListenerInvoker(logContext, streamsRebalanceData); + } + + @Test + public void testSetRebalanceListenerWithNull() { + NullPointerException exception = assertThrows(NullPointerException.class, + () -> invoker.setRebalanceListener(null)); + assertEquals("StreamsRebalanceListener cannot be null", exception.getMessage()); + } + + @Test + public void testSetRebalanceListenerOverwritesExisting() { + StreamsRebalanceListener firstListener = org.mockito.Mockito.mock(StreamsRebalanceListener.class); + StreamsRebalanceListener secondListener = org.mockito.Mockito.mock(StreamsRebalanceListener.class); + + StreamsRebalanceData.Assignment mockAssignment = createMockAssignment(); + when(streamsRebalanceData.reconciledAssignment()).thenReturn(mockAssignment); + + // Set first listener + invoker.setRebalanceListener(firstListener); + + // Overwrite with second listener + invoker.setRebalanceListener(secondListener); + + // Should use second listener + invoker.invokeAllTasksRevoked(); + verify(firstListener, never()).onTasksRevoked(any()); + verify(secondListener).onTasksRevoked(eq(mockAssignment.activeTasks())); + } + + @Test + public void testInvokeMethodsWithNoListener() { + assertNull(invoker.invokeAllTasksRevoked()); + assertNull(invoker.invokeTasksAssigned(createMockAssignment())); + assertNull(invoker.invokeTasksRevoked(createMockTasks())); + assertNull(invoker.invokeAllTasksLost()); + } + + @Test + public void testInvokeAllTasksRevokedWithListener() { + invoker.setRebalanceListener(mockListener); + + StreamsRebalanceData.Assignment mockAssignment = createMockAssignment(); + when(streamsRebalanceData.reconciledAssignment()).thenReturn(mockAssignment); + + Exception result = invoker.invokeAllTasksRevoked(); + + assertNull(result); + verify(mockListener).onTasksRevoked(eq(mockAssignment.activeTasks())); + } + + @Test + public void testInvokeTasksAssignedWithListener() { + invoker.setRebalanceListener(mockListener); + StreamsRebalanceData.Assignment assignment = createMockAssignment(); + + Exception result = invoker.invokeTasksAssigned(assignment); + + assertNull(result); + verify(mockListener).onTasksAssigned(eq(assignment)); + } + + @Test + public void testInvokeTasksAssignedWithWakeupException() { + invoker.setRebalanceListener(mockListener); + StreamsRebalanceData.Assignment assignment = createMockAssignment(); + WakeupException wakeupException = new WakeupException(); + doThrow(wakeupException).when(mockListener).onTasksAssigned(assignment); + + WakeupException thrownException = assertThrows(WakeupException.class, + () -> invoker.invokeTasksAssigned(assignment)); + + assertEquals(wakeupException, thrownException); + verify(mockListener).onTasksAssigned(eq(assignment)); + } + + @Test + public void testInvokeTasksAssignedWithInterruptException() { + invoker.setRebalanceListener(mockListener); + StreamsRebalanceData.Assignment assignment = createMockAssignment(); + InterruptException interruptException = new InterruptException("Test interrupt"); + doThrow(interruptException).when(mockListener).onTasksAssigned(assignment); + + InterruptException thrownException = assertThrows(InterruptException.class, + () -> invoker.invokeTasksAssigned(assignment)); + + assertEquals(interruptException, thrownException); + verify(mockListener).onTasksAssigned(eq(assignment)); + } + + @Test + public void testInvokeTasksAssignedWithOtherException() { + invoker.setRebalanceListener(mockListener); + StreamsRebalanceData.Assignment assignment = createMockAssignment(); + RuntimeException runtimeException = new RuntimeException("Test exception"); + doThrow(runtimeException).when(mockListener).onTasksAssigned(assignment); + + Exception result = invoker.invokeTasksAssigned(assignment); + + assertEquals(runtimeException, result); + verify(mockListener).onTasksAssigned(eq(assignment)); + } + + @Test + public void testInvokeTasksRevokedWithListener() { + invoker.setRebalanceListener(mockListener); + Set tasks = createMockTasks(); + + Exception result = invoker.invokeTasksRevoked(tasks); + + assertNull(result); + verify(mockListener).onTasksRevoked(eq(tasks)); + } + + @Test + public void testInvokeTasksRevokedWithWakeupException() { + invoker.setRebalanceListener(mockListener); + Set tasks = createMockTasks(); + WakeupException wakeupException = new WakeupException(); + doThrow(wakeupException).when(mockListener).onTasksRevoked(tasks); + + WakeupException thrownException = assertThrows(WakeupException.class, + () -> invoker.invokeTasksRevoked(tasks)); + + assertEquals(wakeupException, thrownException); + verify(mockListener).onTasksRevoked(eq(tasks)); + } + + @Test + public void testInvokeTasksRevokedWithInterruptException() { + invoker.setRebalanceListener(mockListener); + Set tasks = createMockTasks(); + InterruptException interruptException = new InterruptException("Test interrupt"); + doThrow(interruptException).when(mockListener).onTasksRevoked(tasks); + + InterruptException thrownException = assertThrows(InterruptException.class, + () -> invoker.invokeTasksRevoked(tasks)); + + assertEquals(interruptException, thrownException); + verify(mockListener).onTasksRevoked(eq(tasks)); + } + + @Test + public void testInvokeTasksRevokedWithOtherException() { + invoker.setRebalanceListener(mockListener); + Set tasks = createMockTasks(); + RuntimeException runtimeException = new RuntimeException("Test exception"); + doThrow(runtimeException).when(mockListener).onTasksRevoked(tasks); + + Exception result = invoker.invokeTasksRevoked(tasks); + + assertEquals(runtimeException, result); + verify(mockListener).onTasksRevoked(eq(tasks)); + } + + @Test + public void testInvokeAllTasksLostWithListener() { + invoker.setRebalanceListener(mockListener); + + Exception result = invoker.invokeAllTasksLost(); + + assertNull(result); + verify(mockListener).onAllTasksLost(); + } + + @Test + public void testInvokeAllTasksLostWithWakeupException() { + invoker.setRebalanceListener(mockListener); + WakeupException wakeupException = new WakeupException(); + doThrow(wakeupException).when(mockListener).onAllTasksLost(); + + WakeupException thrownException = assertThrows(WakeupException.class, + () -> invoker.invokeAllTasksLost()); + + assertEquals(wakeupException, thrownException); + verify(mockListener).onAllTasksLost(); + } + + @Test + public void testInvokeAllTasksLostWithInterruptException() { + invoker.setRebalanceListener(mockListener); + InterruptException interruptException = new InterruptException("Test interrupt"); + doThrow(interruptException).when(mockListener).onAllTasksLost(); + + InterruptException thrownException = assertThrows(InterruptException.class, + () -> invoker.invokeAllTasksLost()); + + assertEquals(interruptException, thrownException); + verify(mockListener).onAllTasksLost(); + } + + @Test + public void testInvokeAllTasksLostWithOtherException() { + invoker.setRebalanceListener(mockListener); + RuntimeException runtimeException = new RuntimeException("Test exception"); + doThrow(runtimeException).when(mockListener).onAllTasksLost(); + + Exception result = invoker.invokeAllTasksLost(); + + assertEquals(runtimeException, result); + verify(mockListener).onAllTasksLost(); + } + + private StreamsRebalanceData.Assignment createMockAssignment() { + Set activeTasks = createMockTasks(); + Set standbyTasks = Set.of(); + Set warmupTasks = Set.of(); + + return new StreamsRebalanceData.Assignment(activeTasks, standbyTasks, warmupTasks); + } + + private Set createMockTasks() { + return Set.of( + new StreamsRebalanceData.TaskId("subtopology1", 0), + new StreamsRebalanceData.TaskId("subtopology1", 1) + ); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java index f697990b54425..4d4a725d45c49 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java @@ -26,6 +26,7 @@ import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.utils.LogContext; @@ -33,16 +34,15 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; +import java.util.List; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; +import java.util.function.Predicate; import java.util.regex.Pattern; -import static java.util.Collections.singleton; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -64,14 +64,14 @@ public class SubscriptionStateTest { @Test public void partitionAssignment() { - state.assignFromUser(singleton(tp0)); - assertEquals(singleton(tp0), state.assignedPartitions()); + state.assignFromUser(Set.of(tp0)); + assertEquals(Set.of(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); assertFalse(state.hasAllFetchPositions()); state.seek(tp0, 1); assertTrue(state.isFetchable(tp0)); assertEquals(1L, state.position(tp0).offset); - state.assignFromUser(Collections.emptySet()); + state.assignFromUser(Set.of()); assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); assertFalse(state.isAssigned(tp0)); @@ -80,7 +80,7 @@ public void partitionAssignment() { @Test public void partitionAssignmentChangeOnTopicSubscription() { - state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + state.assignFromUser(Set.of(tp0, tp1)); // assigned partitions should immediately change assertEquals(2, state.assignedPartitions().size()); assertEquals(2, state.numAssignedPartitions()); @@ -92,20 +92,20 @@ public void partitionAssignmentChangeOnTopicSubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(t1p0))); - state.assignFromSubscribed(singleton(t1p0)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(t1p0))); + state.assignFromSubscribed(Set.of(t1p0)); // assigned partitions should immediately change - assertEquals(singleton(t1p0), state.assignedPartitions()); + assertEquals(Set.of(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged - assertEquals(singleton(t1p0), state.assignedPartitions()); + assertEquals(Set.of(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -114,26 +114,74 @@ public void partitionAssignmentChangeOnTopicSubscription() { assertEquals(0, state.numAssignedPartitions()); } + @Test + public void testIsFetchableOnManualAssignment() { + state.assignFromUser(Set.of(tp0, tp1)); + assertAssignedPartitionIsFetchable(); + } + + @Test + public void testIsFetchableOnAutoAssignment() { + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribed(Set.of(tp0, tp1)); + assertAssignedPartitionIsFetchable(); + } + + private void assertAssignedPartitionIsFetchable() { + assertEquals(2, state.assignedPartitions().size()); + assertTrue(state.assignedPartitions().contains(tp0)); + assertTrue(state.assignedPartitions().contains(tp1)); + + assertFalse(state.isFetchable(tp0), "Should not be fetchable without a valid position"); + assertFalse(state.isFetchable(tp1), "Should not be fetchable without a valid position"); + + state.seek(tp0, 1); + state.seek(tp1, 1); + + assertTrue(state.isFetchable(tp0)); + assertTrue(state.isFetchable(tp1)); + } + + @Test + public void testIsFetchableConsidersExplicitTopicSubscription() { + state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); + state.assignFromSubscribed(Set.of(t1p0)); + state.seek(t1p0, 1); + + assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertTrue(state.isFetchable(t1p0)); + + // Change subscription. Assigned partitions should remain unchanged but not fetchable. + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertFalse(state.isFetchable(t1p0), "Assigned partitions not in the subscription should not be fetchable"); + + // Unsubscribe. Assigned partitions should be cleared and not fetchable. + state.unsubscribe(); + assertTrue(state.assignedPartitions().isEmpty()); + assertFalse(state.isFetchable(t1p0)); + } + @Test public void testGroupSubscribe() { - state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); - assertEquals(singleton(topic1), state.metadataTopics()); + state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); + assertEquals(Set.of(topic1), state.metadataTopics()); - assertFalse(state.groupSubscribe(singleton(topic1))); - assertEquals(singleton(topic1), state.metadataTopics()); + assertFalse(state.groupSubscribe(Set.of(topic1))); + assertEquals(Set.of(topic1), state.metadataTopics()); assertTrue(state.groupSubscribe(Set.of(topic, topic1))); assertEquals(Set.of(topic, topic1), state.metadataTopics()); // `groupSubscribe` does not accumulate - assertFalse(state.groupSubscribe(singleton(topic1))); - assertEquals(singleton(topic1), state.metadataTopics()); + assertFalse(state.groupSubscribe(Set.of(topic1))); + assertEquals(Set.of(topic1), state.metadataTopics()); - state.subscribe(singleton("anotherTopic"), Optional.of(rebalanceListener)); + state.subscribe(Set.of("anotherTopic"), Optional.of(rebalanceListener)); assertEquals(Set.of(topic1, "anotherTopic"), state.metadataTopics()); - assertFalse(state.groupSubscribe(singleton("anotherTopic"))); - assertEquals(singleton("anotherTopic"), state.metadataTopics()); + assertFalse(state.groupSubscribe(Set.of("anotherTopic"))); + assertEquals(Set.of("anotherTopic"), state.metadataTopics()); } @Test @@ -143,44 +191,44 @@ public void partitionAssignmentChangeOnPatternSubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.subscribeFromPattern(Collections.singleton(topic)); + state.subscribeFromPattern(Set.of(topic)); // assigned partitions should remain unchanged assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); - state.assignFromSubscribed(singleton(tp1)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); + state.assignFromSubscribed(Set.of(tp1)); // assigned partitions should immediately change - assertEquals(singleton(tp1), state.assignedPartitions()); + assertEquals(Set.of(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(singleton(topic), state.subscription()); + assertEquals(Set.of(topic), state.subscription()); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(t1p0))); - state.assignFromSubscribed(singleton(t1p0)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(t1p0))); + state.assignFromSubscribed(Set.of(t1p0)); // assigned partitions should immediately change - assertEquals(singleton(t1p0), state.assignedPartitions()); + assertEquals(Set.of(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(singleton(topic), state.subscription()); + assertEquals(Set.of(topic), state.subscription()); state.subscribe(Pattern.compile(".*t"), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged - assertEquals(singleton(t1p0), state.assignedPartitions()); + assertEquals(Set.of(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - state.subscribeFromPattern(singleton(topic)); + state.subscribeFromPattern(Set.of(topic)); // assigned partitions should remain unchanged - assertEquals(singleton(t1p0), state.assignedPartitions()); + assertEquals(Set.of(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); - state.assignFromSubscribed(singleton(tp0)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); + state.assignFromSubscribed(Set.of(tp0)); // assigned partitions should immediately change - assertEquals(singleton(tp0), state.assignedPartitions()); + assertEquals(Set.of(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(singleton(topic), state.subscription()); + assertEquals(Set.of(topic), state.subscription()); state.unsubscribe(); // assigned partitions should immediately change @@ -198,10 +246,10 @@ public void verifyAssignmentId() { state.unsubscribe(); assertEquals(2, state.assignmentId()); - assertEquals(Collections.emptySet(), state.assignedPartitions()); + assertEquals(Set.of(), state.assignedPartitions()); Set autoAssignment = Set.of(t1p0); - state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); assertTrue(state.checkAssignmentMatchedSubscription(autoAssignment)); state.assignFromSubscribed(autoAssignment); assertEquals(3, state.assignmentId()); @@ -210,7 +258,7 @@ public void verifyAssignmentId() { @Test public void partitionReset() { - state.assignFromUser(singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seek(tp0, 5); assertEquals(5L, state.position(tp0).offset); state.requestOffsetReset(tp0); @@ -226,29 +274,29 @@ public void partitionReset() { @Test public void topicSubscription() { - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); assertEquals(1, state.subscription().size()); assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); assertTrue(state.hasAutoAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); - state.assignFromSubscribed(singleton(tp0)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); + state.assignFromSubscribed(Set.of(tp0)); state.seek(tp0, 1); assertEquals(1L, state.position(tp0).offset); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); - state.assignFromSubscribed(singleton(tp1)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); + state.assignFromSubscribed(Set.of(tp1)); assertTrue(state.isAssigned(tp1)); assertFalse(state.isAssigned(tp0)); assertFalse(state.isFetchable(tp1)); - assertEquals(singleton(tp1), state.assignedPartitions()); + assertEquals(Set.of(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); } @Test public void partitionPause() { - state.assignFromUser(singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); state.pause(tp0); @@ -259,10 +307,10 @@ public void partitionPause() { @Test public void testMarkingPartitionPending() { - state.assignFromUser(singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); - state.markPendingRevocation(singleton(tp0)); + state.markPendingRevocation(Set.of(tp0)); assertFalse(state.isFetchable(tp0)); assertFalse(state.isPaused(tp0)); } @@ -270,16 +318,17 @@ public void testMarkingPartitionPending() { @Test public void testAssignedPartitionsAwaitingCallbackKeepPositionDefinedInCallback() { // New partition assigned. Should not be fetchable or initializing positions. - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); + assertEquals(Set.of(tp0.topic()), state.subscription()); // Simulate callback setting position to start fetching from state.seek(tp0, 100); // Callback completed. Partition should be fetchable, and should not require // initializing positions (position already defined in the callback) - state.enablePartitionsAwaitingCallback(singleton(tp0)); + state.enablePartitionsAwaitingCallback(Set.of(tp0)); assertEquals(0, state.initializingPartitions().size()); assertTrue(state.isFetchable(tp0)); assertTrue(state.hasAllFetchPositions()); @@ -289,13 +338,14 @@ public void testAssignedPartitionsAwaitingCallbackKeepPositionDefinedInCallback( @Test public void testAssignedPartitionsAwaitingCallbackInitializePositionsWhenCallbackCompletes() { // New partition assigned. Should not be fetchable or initializing positions. - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); + assertEquals(Set.of(tp0.topic()), state.subscription()); // Callback completed (without updating positions). Partition should require initializing // positions, and start fetching once a valid position is set. - state.enablePartitionsAwaitingCallback(singleton(tp0)); + state.enablePartitionsAwaitingCallback(Set.of(tp0)); assertEquals(1, state.initializingPartitions().size()); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); @@ -306,22 +356,23 @@ public void testAssignedPartitionsAwaitingCallbackInitializePositionsWhenCallbac @Test public void testAssignedPartitionsAwaitingCallbackDoesNotAffectPreviouslyOwnedPartitions() { // First partition assigned and callback completes. - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); - state.enablePartitionsAwaitingCallback(singleton(tp0)); + assertEquals(Set.of(tp0.topic()), state.subscription()); + state.enablePartitionsAwaitingCallback(Set.of(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); // New partition added to the assignment. Owned partitions should continue to be // fetchable, while the newly added should not be fetchable until callback completes. - state.assignFromSubscribedAwaitingCallback(Set.of(tp0, tp1), singleton(tp1)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0, tp1), Set.of(tp1)); assertTrue(state.isFetchable(tp0)); assertFalse(state.isFetchable(tp1)); assertEquals(1, state.initializingPartitions().size()); // Callback completed. Added partition be initializing positions and become fetchable when it gets one. - state.enablePartitionsAwaitingCallback(singleton(tp1)); + state.enablePartitionsAwaitingCallback(Set.of(tp1)); assertEquals(1, state.initializingPartitions().size()); assertEquals(tp1, state.initializingPartitions().iterator().next()); state.seek(tp1, 200); @@ -329,9 +380,8 @@ public void testAssignedPartitionsAwaitingCallbackDoesNotAffectPreviouslyOwnedPa } private void assertAssignmentAppliedAwaitingCallback(TopicPartition topicPartition) { - assertEquals(singleton(topicPartition), state.assignedPartitions()); + assertEquals(Set.of(topicPartition), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(singleton(topicPartition.topic()), state.subscription()); assertFalse(state.isFetchable(topicPartition)); assertEquals(1, state.initializingPartitions().size()); @@ -340,9 +390,9 @@ private void assertAssignmentAppliedAwaitingCallback(TopicPartition topicPartiti @Test public void invalidPositionUpdate() { - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); - state.assignFromSubscribed(singleton(tp0)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); + state.assignFromSubscribed(Set.of(tp0)); assertThrows(IllegalStateException.class, () -> state.position(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), leaderAndEpoch))); @@ -350,15 +400,15 @@ public void invalidPositionUpdate() { @Test public void cantAssignPartitionForUnsubscribedTopics() { - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - assertFalse(state.checkAssignmentMatchedSubscription(Collections.singletonList(t1p0))); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + assertFalse(state.checkAssignmentMatchedSubscription(List.of(t1p0))); } @Test public void cantAssignPartitionForUnmatchedPattern() { state.subscribe(Pattern.compile(".*t"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(Collections.singleton(topic)); - assertFalse(state.checkAssignmentMatchedSubscription(Collections.singletonList(t1p0))); + state.subscribeFromPattern(Set.of(topic)); + assertFalse(state.checkAssignmentMatchedSubscription(List.of(t1p0))); } @Test @@ -369,32 +419,32 @@ public void cantChangePositionForNonAssignedPartition() { @Test public void cantSubscribeTopicAndPattern() { - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); assertThrows(IllegalStateException.class, () -> state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener))); } @Test public void cantSubscribePartitionAndPattern() { - state.assignFromUser(singleton(tp0)); + state.assignFromUser(Set.of(tp0)); assertThrows(IllegalStateException.class, () -> state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener))); } @Test public void cantSubscribePatternAndTopic() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - assertThrows(IllegalStateException.class, () -> state.subscribe(singleton(topic), Optional.of(rebalanceListener))); + assertThrows(IllegalStateException.class, () -> state.subscribe(Set.of(topic), Optional.of(rebalanceListener))); } @Test public void cantSubscribePatternAndPartition() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - assertThrows(IllegalStateException.class, () -> state.assignFromUser(singleton(tp0))); + assertThrows(IllegalStateException.class, () -> state.assignFromUser(Set.of(tp0))); } @Test public void patternSubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); + state.subscribeFromPattern(Set.of(topic, topic1)); assertEquals(2, state.subscription().size(), "Expected subscribed topics count is incorrect"); } @@ -404,6 +454,75 @@ public void testSubscribeToRe2JPattern() { state.subscribe(new SubscriptionPattern(pattern), Optional.of(rebalanceListener)); assertTrue(state.toString().contains("type=AUTO_PATTERN_RE2J")); assertTrue(state.toString().contains("subscribedPattern=" + pattern)); + assertTrue(state.assignedTopicIds().isEmpty()); + } + + @Test + public void testIsAssignedFromRe2j() { + assertFalse(state.isAssignedFromRe2j(null)); + Uuid assignedUuid = Uuid.randomUuid(); + assertFalse(state.isAssignedFromRe2j(assignedUuid)); + + state.subscribe(new SubscriptionPattern("foo.*"), Optional.empty()); + assertTrue(state.hasRe2JPatternSubscription()); + assertFalse(state.isAssignedFromRe2j(assignedUuid)); + + state.setAssignedTopicIds(Set.of(assignedUuid)); + assertTrue(state.isAssignedFromRe2j(assignedUuid)); + + state.unsubscribe(); + assertFalse(state.isAssignedFromRe2j(assignedUuid)); + assertFalse(state.hasRe2JPatternSubscription()); + + } + + @Test + public void testAssignedPartitionsWithTopicIdsForRe2Pattern() { + state.subscribe(new SubscriptionPattern("t.*"), Optional.of(rebalanceListener)); + assertTrue(state.assignedTopicIds().isEmpty()); + + TopicIdPartitionSet reconciledAssignmentFromRegex = new TopicIdPartitionSet(); + reconciledAssignmentFromRegex.addAll(Uuid.randomUuid(), topic, Set.of(0)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); + assertAssignmentAppliedAwaitingCallback(tp0); + + // Simulate callback setting position to start fetching from + state.seek(tp0, 100); + + // Callback completed. Partition should be fetchable, from the position previously defined + state.enablePartitionsAwaitingCallback(Set.of(tp0)); + assertEquals(0, state.initializingPartitions().size()); + assertTrue(state.isFetchable(tp0)); + assertTrue(state.hasAllFetchPositions()); + assertEquals(100L, state.position(tp0).offset); + } + + @Test + public void testAssignedTopicIdsPreservedWhenReconciliationCompletes() { + state.subscribe(new SubscriptionPattern("t.*"), Optional.of(rebalanceListener)); + assertTrue(state.assignedTopicIds().isEmpty()); + + // First assignment received from coordinator + Uuid firstAssignedUuid = Uuid.randomUuid(); + state.setAssignedTopicIds(Set.of(firstAssignedUuid)); + + // Second assignment received from coordinator (while the 1st still be reconciling) + Uuid secondAssignedUuid = Uuid.randomUuid(); + state.setAssignedTopicIds(Set.of(firstAssignedUuid, secondAssignedUuid)); + + // First reconciliation completes and updates the subscription state + state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); + + // First assignment should have been applied + assertAssignmentAppliedAwaitingCallback(tp0); + + // Assigned topic IDs should still have both topics (one reconciled, one not reconciled yet) + assertEquals( + Set.of(firstAssignedUuid, secondAssignedUuid), + state.assignedTopicIds(), + "Updating the subscription state when a reconciliation completes " + + "should not overwrite assigned topics that have not been reconciled yet" + ); } @Test @@ -434,29 +553,29 @@ public void testSubscriptionPattern() { @Test public void unsubscribeUserAssignment() { - state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + state.assignFromUser(Set.of(tp0, tp1)); state.unsubscribe(); - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); - assertEquals(singleton(topic), state.subscription()); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + assertEquals(Set.of(topic), state.subscription()); } @Test public void unsubscribeUserSubscribe() { - state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); state.unsubscribe(); - state.assignFromUser(singleton(tp0)); - assertEquals(singleton(tp0), state.assignedPartitions()); + state.assignFromUser(Set.of(tp0)); + assertEquals(Set.of(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); } @Test public void unsubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); - assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); - state.assignFromSubscribed(singleton(tp1)); + state.subscribeFromPattern(Set.of(topic, topic1)); + assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); + state.assignFromSubscribed(Set.of(tp1)); - assertEquals(singleton(tp1), state.assignedPartitions()); + assertEquals(Set.of(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -464,8 +583,8 @@ public void unsubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.assignFromUser(singleton(tp0)); - assertEquals(singleton(tp0), state.assignedPartitions()); + state.assignFromUser(Set.of(tp0)); + assertEquals(Set.of(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -476,15 +595,15 @@ public void unsubscription() { @Test public void testPreferredReadReplicaLease() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); // Default state assertFalse(state.preferredReadReplica(tp0, 0L).isPresent()); // Set the preferred replica with lease state.updatePreferredReadReplica(tp0, 42, () -> 10L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 9L), value -> assertEquals(value.intValue(), 42)); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 10L), value -> assertEquals(value.intValue(), 42)); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 9L), value -> assertEquals(42, value.intValue())); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 10L), value -> assertEquals(42, value.intValue())); assertFalse(state.preferredReadReplica(tp0, 11L).isPresent()); // Unset the preferred replica @@ -494,20 +613,20 @@ public void testPreferredReadReplicaLease() { // Set to new preferred replica with lease state.updatePreferredReadReplica(tp0, 43, () -> 20L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 11L), value -> assertEquals(value.intValue(), 43)); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 20L), value -> assertEquals(value.intValue(), 43)); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 11L), value -> assertEquals(43, value.intValue())); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 20L), value -> assertEquals(43, value.intValue())); assertFalse(state.preferredReadReplica(tp0, 21L).isPresent()); // Set to new preferred replica without clearing first state.updatePreferredReadReplica(tp0, 44, () -> 30L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 30L), value -> assertEquals(value.intValue(), 44)); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 30L), value -> assertEquals(44, value.intValue())); assertFalse(state.preferredReadReplica(tp0, 31L).isPresent()); } @Test public void testSeekUnvalidatedWithNoOffsetEpoch() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); // Seek with no offset epoch requires no validation no matter what the current leader is state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.empty(), @@ -531,7 +650,7 @@ public void testSeekUnvalidatedWithNoOffsetEpoch() { @Test public void testSeekUnvalidatedWithNoEpochClearsAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); // Seek with no offset epoch requires no validation no matter what the current leader is state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.of(2), @@ -551,7 +670,7 @@ public void testSeekUnvalidatedWithOffsetEpoch() { ApiVersions apiVersions = new ApiVersions(); apiVersions.update(broker1.idString(), NodeApiVersions.create()); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.of(2), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(5)))); @@ -580,7 +699,7 @@ public void testSeekUnvalidatedWithOffsetEpoch() { @Test public void testSeekValidatedShouldClearAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -598,7 +717,7 @@ public void testSeekValidatedShouldClearAwaitingValidation() { @Test public void testCompleteValidationShouldClearAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -615,7 +734,7 @@ public void testCompleteValidationShouldClearAwaitingValidation() { @Test public void testOffsetResetWhileAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -629,7 +748,7 @@ public void testOffsetResetWhileAwaitingValidation() { @Test public void testMaybeCompleteValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -656,7 +775,7 @@ public void testMaybeValidatePositionForCurrentLeader() { apiVersions.update("1", oldApis); Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -685,7 +804,7 @@ public void testMaybeValidatePositionForCurrentLeader() { @Test public void testMaybeCompleteValidationAfterPositionChange() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -714,7 +833,7 @@ public void testMaybeCompleteValidationAfterPositionChange() { @Test public void testMaybeCompleteValidationAfterOffsetReset() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -740,7 +859,7 @@ public void testMaybeCompleteValidationAfterOffsetReset() { @Test public void testTruncationDetectionWithResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -769,7 +888,7 @@ public void testTruncationDetectionWithResetPolicy() { public void testTruncationDetectionWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -799,7 +918,7 @@ public void testTruncationDetectionWithoutResetPolicy() { public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -824,7 +943,7 @@ public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { public void testTruncationDetectionUnknownDivergentOffsetWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -872,7 +991,7 @@ public void resetOffsetNoValidation() { // Check that offset reset works when we can't validate offsets (older brokers) Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); // Reset offsets state.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); @@ -918,7 +1037,7 @@ public void resetOffsetNoValidation() { @Test public void nullPositionLagOnNoPosition() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); assertNull(state.partitionLag(tp0, IsolationLevel.READ_UNCOMMITTED)); assertNull(state.partitionLag(tp0, IsolationLevel.READ_COMMITTED)); @@ -932,7 +1051,7 @@ public void nullPositionLagOnNoPosition() { @Test public void testPositionOrNull() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); state.seek(tp0, 5); @@ -942,7 +1061,7 @@ public void testPositionOrNull() { @Test public void testTryUpdatingHighWatermark() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long highWatermark = 10L; @@ -953,7 +1072,7 @@ public void testTryUpdatingHighWatermark() { @Test public void testTryUpdatingLogStartOffset() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long position = 25; state.seek(tp0, position); @@ -966,7 +1085,7 @@ public void testTryUpdatingLogStartOffset() { @Test public void testTryUpdatingLastStableOffset() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long lastStableOffset = 10L; @@ -977,7 +1096,7 @@ public void testTryUpdatingLastStableOffset() { @Test public void testTryUpdatingPreferredReadReplica() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final int preferredReadReplicaId = 10; @@ -990,7 +1109,7 @@ public void testTryUpdatingPreferredReadReplica() { @Test public void testRequestOffsetResetIfPartitionAssigned() { - state.assignFromUser(Collections.singleton(tp0)); + state.assignFromUser(Set.of(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); state.requestOffsetResetIfPartitionAssigned(tp0); @@ -1001,4 +1120,33 @@ public void testRequestOffsetResetIfPartitionAssigned() { assertThrows(IllegalStateException.class, () -> state.isOffsetResetNeeded(unassignedPartition)); } + + // This test ensures the "fetchablePartitions" does not run the custom predicate if the partition is not fetchable + // This func is used in the hot path for fetching, to find fetchable partitions that are not in the buffer, + // so it should avoid evaluating the predicate if not needed. + @Test + public void testFetchablePartitionsPerformsCheapChecksFirst() { + // Setup fetchable partition and pause it + state.assignFromUser(Set.of(tp0)); + state.seek(tp0, 100); + assertTrue(state.isFetchable(tp0)); + state.pause(tp0); + + // Retrieve fetchable partitions with custom predicate. + AtomicBoolean predicateEvaluated = new AtomicBoolean(false); + Predicate isBuffered = tp -> { + predicateEvaluated.set(true); + return true; + }; + List fetchablePartitions = state.fetchablePartitions(isBuffered); + assertTrue(fetchablePartitions.isEmpty()); + assertFalse(predicateEvaluated.get(), "Custom predicate should not be evaluated when partitions are not fetchable"); + + // Resume partition and retrieve fetchable again + state.resume(tp0); + predicateEvaluated.set(false); + fetchablePartitions = state.fetchablePartitions(isBuffered); + assertTrue(predicateEvaluated.get()); + assertEquals(tp0, fetchablePartitions.get(0)); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSetTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSetTest.java new file mode 100644 index 0000000000000..f14e911578acc --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSetTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class TopicIdPartitionSetTest { + + private TopicIdPartitionSet topicIdPartitionSet; + + + @BeforeEach + public void setUp() { + topicIdPartitionSet = new TopicIdPartitionSet(); + } + + @Test + public void testIsEmpty() { + assertTrue(topicIdPartitionSet.isEmpty()); + + TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + topicIdPartitionSet.add(topicIdPartition); + + assertFalse(topicIdPartitionSet.isEmpty()); + } + + @Test + public void testRetrieveTopicPartitions() { + TopicPartition tp1 = new TopicPartition("foo", 0); + TopicPartition tp2 = new TopicPartition("foo", 1); + TopicPartition tp3 = new TopicPartition("bar", 0); + Uuid topicId1 = Uuid.randomUuid(); + Uuid topicId2 = Uuid.randomUuid(); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, tp1)); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, tp2)); + topicIdPartitionSet.add(new TopicIdPartition(topicId2, tp3)); + + Set topicPartitionSet = topicIdPartitionSet.topicPartitions(); + assertEquals(3, topicPartitionSet.size()); + assertTrue(topicPartitionSet.contains(tp1)); + assertTrue(topicPartitionSet.contains(tp2)); + assertTrue(topicPartitionSet.contains(tp3)); + } + + @Test + public void testRetrieveTopicIds() { + Uuid topicId1 = Uuid.randomUuid(); + Uuid topicId2 = Uuid.randomUuid(); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, new TopicPartition("foo", 0))); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, new TopicPartition("foo", 1))); + topicIdPartitionSet.add(new TopicIdPartition(topicId2, new TopicPartition("bar", 0))); + + Set topicIds = topicIdPartitionSet.topicIds(); + assertEquals(2, topicIds.size()); + assertTrue(topicIds.contains(topicId1)); + assertTrue(topicIds.contains(topicId2)); + } + + @Test + public void testRetrieveTopicNames() { + String topic1 = "foo"; + String topic2 = "bar"; + Uuid topicId1 = Uuid.randomUuid(); + Uuid topicId2 = Uuid.randomUuid(); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, new TopicPartition(topic1, 0))); + topicIdPartitionSet.add(new TopicIdPartition(topicId1, new TopicPartition(topic1, 1))); + topicIdPartitionSet.add(new TopicIdPartition(topicId2, new TopicPartition(topic2, 0))); + + Set topicNames = topicIdPartitionSet.topicNames(); + assertEquals(2, topicNames.size()); + assertTrue(topicNames.contains(topic1)); + assertTrue(topicNames.contains(topic2)); + } + + @Test + public void testRetrievedTopicNamesAreSorted() { + LinkedHashSet expectedOrderedTopicPartitions = new LinkedHashSet<>(); + expectedOrderedTopicPartitions.add(new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic-z", 1))); + expectedOrderedTopicPartitions.add(new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic-z", 0))); + expectedOrderedTopicPartitions.add(new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic-a", 0))); + expectedOrderedTopicPartitions.add(new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic-a", 1))); + + List reversed = new ArrayList<>(expectedOrderedTopicPartitions); + Collections.reverse(reversed); + reversed.forEach(tp -> topicIdPartitionSet.add(tp)); + + List topicPartitions = new ArrayList<>(topicIdPartitionSet.toTopicNamePartitionSet()); + + assertEquals(4, topicPartitions.size()); + assertEquals(new TopicPartition("topic-a", 0), topicPartitions.get(0)); + assertEquals(new TopicPartition("topic-a", 1), topicPartitions.get(1)); + assertEquals(new TopicPartition("topic-z", 0), topicPartitions.get(2)); + assertEquals(new TopicPartition("topic-z", 1), topicPartitions.get(3)); + } + + @Test + public void testToString() { + Uuid topicId1 = Uuid.randomUuid(); + TopicIdPartition tp1 = new TopicIdPartition(topicId1, new TopicPartition("topic-a", 0)); + TopicIdPartition tp2 = new TopicIdPartition(topicId1, new TopicPartition("topic-a", 1)); + TopicIdPartition tp3 = new TopicIdPartition(topicId1, new TopicPartition("topic-b", 0)); + topicIdPartitionSet.add(tp1); + topicIdPartitionSet.add(tp2); + topicIdPartitionSet.add(tp3); + + String toString = topicIdPartitionSet.toString(); + assertEquals(List.of(tp1, tp2, tp3).toString(), toString); + } + + @Test + public void testToStringSorted() { + Uuid topicId1 = Uuid.randomUuid(); + TopicIdPartition tp1 = new TopicIdPartition(topicId1, new TopicPartition("topic-a", 0)); + TopicIdPartition tpz1 = new TopicIdPartition(topicId1, new TopicPartition("topic-z", 0)); + TopicIdPartition tpz2 = new TopicIdPartition(topicId1, new TopicPartition("topic-z", 1)); + topicIdPartitionSet.add(tpz2); + topicIdPartitionSet.add(tpz1); + topicIdPartitionSet.add(tp1); + + String toString = topicIdPartitionSet.toString(); + assertEquals(List.of(tp1, tpz1, tpz2).toString(), toString); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java index c977c3ebf3da8..f8cc3ee0cccde 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java @@ -73,6 +73,7 @@ public void setup() { props.put(ALLOW_AUTO_CREATE_TOPICS_CONFIG, false); props.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); this.topicMetadataRequestManager = spy(new TopicMetadataRequestManager( new LogContext(), time, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java index 9cd306a9be157..dde3f567132fc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java @@ -31,15 +31,19 @@ import org.apache.kafka.clients.consumer.internals.NetworkClientDelegate; import org.apache.kafka.clients.consumer.internals.OffsetsRequestManager; import org.apache.kafka.clients.consumer.internals.RequestManagers; +import org.apache.kafka.clients.consumer.internals.StreamsGroupHeartbeatRequestManager; +import org.apache.kafka.clients.consumer.internals.StreamsMembershipManager; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.clients.consumer.internals.TopicMetadataRequestManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -86,6 +90,8 @@ public class ApplicationEventProcessorTest { private final OffsetsRequestManager offsetsRequestManager = mock(OffsetsRequestManager.class); private SubscriptionState subscriptionState = mock(SubscriptionState.class); private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); + private final StreamsGroupHeartbeatRequestManager streamsGroupHeartbeatRequestManager = mock(StreamsGroupHeartbeatRequestManager.class); + private final StreamsMembershipManager streamsMembershipManager = mock(StreamsMembershipManager.class); private ApplicationEventProcessor processor; private void setupProcessor(boolean withGroupId) { @@ -109,6 +115,27 @@ private void setupProcessor(boolean withGroupId) { ); } + private void setupStreamProcessor(boolean withGroupId) { + RequestManagers requestManagers = new RequestManagers( + new LogContext(), + offsetsRequestManager, + mock(TopicMetadataRequestManager.class), + mock(FetchRequestManager.class), + withGroupId ? Optional.of(mock(CoordinatorRequestManager.class)) : Optional.empty(), + withGroupId ? Optional.of(commitRequestManager) : Optional.empty(), + withGroupId ? Optional.of(heartbeatRequestManager) : Optional.empty(), + Optional.empty(), + withGroupId ? Optional.of(streamsGroupHeartbeatRequestManager) : Optional.empty(), + withGroupId ? Optional.of(streamsMembershipManager) : Optional.empty() + ); + processor = new ApplicationEventProcessor( + new LogContext(), + requestManagers, + metadata, + subscriptionState + ); + } + @Test public void testPrepClosingCommitEvents() { setupProcessor(true); @@ -556,6 +583,78 @@ public void testAsyncCommitEventWithException() { assertFutureThrows(IllegalStateException.class, event.future()); } + @Test + public void testStreamsOnTasksRevokedCallbackCompletedEvent() { + setupStreamProcessor(true); + StreamsOnTasksRevokedCallbackCompletedEvent event = + new StreamsOnTasksRevokedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + processor.process(event); + verify(streamsMembershipManager).onTasksRevokedCallbackCompleted(event); + } + + @Test + public void testStreamsOnTasksRevokedCallbackCompletedEventWithoutStreamsMembershipManager() { + setupStreamProcessor(false); + StreamsOnTasksRevokedCallbackCompletedEvent event = + new StreamsOnTasksRevokedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { + logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); + processor.process(event); + assertTrue(logAppender.getMessages().stream().anyMatch(e -> + e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + + "of the onTasksRevoked callback execution could not be sent"))); + verify(streamsMembershipManager, never()).onTasksRevokedCallbackCompleted(event); + } + } + + @Test + public void testStreamsOnTasksAssignedCallbackCompletedEvent() { + setupStreamProcessor(true); + StreamsOnTasksAssignedCallbackCompletedEvent event = + new StreamsOnTasksAssignedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + processor.process(event); + verify(streamsMembershipManager).onTasksAssignedCallbackCompleted(event); + } + + @Test + public void testStreamsOnTasksAssignedCallbackCompletedEventWithoutStreamsMembershipManager() { + setupStreamProcessor(false); + StreamsOnTasksAssignedCallbackCompletedEvent event = + new StreamsOnTasksAssignedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { + logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); + processor.process(event); + assertTrue(logAppender.getMessages().stream().anyMatch(e -> + e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + + "of the onTasksAssigned callback execution could not be sent"))); + verify(streamsMembershipManager, never()).onTasksAssignedCallbackCompleted(event); + } + } + + @Test + public void testStreamsOnAllTasksLostCallbackCompletedEvent() { + setupStreamProcessor(true); + StreamsOnAllTasksLostCallbackCompletedEvent event = + new StreamsOnAllTasksLostCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + processor.process(event); + verify(streamsMembershipManager).onAllTasksLostCallbackCompleted(event); + } + + @Test + public void testStreamsOnAllTasksLostCallbackCompletedEventWithoutStreamsMembershipManager() { + setupStreamProcessor(false); + StreamsOnAllTasksLostCallbackCompletedEvent event = + new StreamsOnAllTasksLostCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); + try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { + logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); + processor.process(event); + assertTrue(logAppender.getMessages().stream().anyMatch(e -> + e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + + "of the onAllTasksLost callback execution could not be sent"))); + verify(streamsMembershipManager, never()).onAllTasksLostCallbackCompleted(event); + } + } + private List mockCommitResults() { return Collections.singletonList(mock(NetworkClientDelegate.UnsentRequest.class)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java index 2913bcfad70f1..876bc3ffa12da 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java @@ -20,12 +20,14 @@ import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Set; +import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -36,6 +38,13 @@ public class AsyncConsumerMetricsTest { private final Metrics metrics = new Metrics(); private AsyncConsumerMetrics consumerMetrics; + public static Stream groupNameProvider() { + return Stream.of( + CONSUMER_METRIC_GROUP, + CONSUMER_SHARE_METRIC_GROUP + ); + } + @AfterEach public void tearDown() { if (consumerMetrics != null) { @@ -44,43 +53,29 @@ public void tearDown() { metrics.close(); } - @Test - public void shouldMetricNames() { + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldMetricNames(String groupName) { // create - consumerMetrics = new AsyncConsumerMetrics(metrics); - HashSet expectedMetrics = new HashSet<>(Arrays.asList( - metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), - metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), - metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), - metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) - )); - expectedMetrics.forEach( - metricName -> assertTrue( - metrics.metrics().containsKey(metricName), - "Missing metric: " + metricName - ) + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + Set expectedMetrics = Set.of( + metrics.metricName("time-between-network-thread-poll-avg", groupName), + metrics.metricName("time-between-network-thread-poll-max", groupName), + metrics.metricName("application-event-queue-size", groupName), + metrics.metricName("application-event-queue-time-avg", groupName), + metrics.metricName("application-event-queue-time-max", groupName), + metrics.metricName("application-event-queue-processing-time-avg", groupName), + metrics.metricName("application-event-queue-processing-time-max", groupName), + metrics.metricName("unsent-requests-queue-size", groupName), + metrics.metricName("unsent-requests-queue-time-avg", groupName), + metrics.metricName("unsent-requests-queue-time-max", groupName), + metrics.metricName("background-event-queue-size", groupName), + metrics.metricName("background-event-queue-time-avg", groupName), + metrics.metricName("background-event-queue-time-max", groupName), + metrics.metricName("background-event-queue-processing-time-avg", groupName), + metrics.metricName("background-event-queue-processing-time-max", groupName) ); - - HashSet expectedConsumerMetrics = new HashSet<>(Arrays.asList( - metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP), - metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP), - metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP), - metrics.metricName("application-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("application-event-queue-processing-time-max", CONSUMER_METRIC_GROUP), - metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP), - metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP), - metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP), - metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP), - metrics.metricName("background-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("background-event-queue-processing-time-max", CONSUMER_METRIC_GROUP) - )); - expectedConsumerMetrics.forEach( + expectedMetrics.forEach( metricName -> assertTrue( metrics.metrics().containsKey(metricName), "Missing metric: " + metricName @@ -95,143 +90,146 @@ public void shouldMetricNames() { "Metric present after close: " + metricName ) ); - expectedConsumerMetrics.forEach( - metricName -> assertFalse( - metrics.metrics().containsKey(metricName), - "Metric present after close: " + metricName - ) - ); } - @Test - public void shouldRecordTimeBetweenNetworkThreadPoll() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordTimeBetweenNetworkThreadPoll(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordTimeBetweenNetworkThreadPoll(METRIC_VALUE); // Then: - assertMetricValue("time-between-network-thread-poll-avg"); - assertMetricValue("time-between-network-thread-poll-max"); + assertMetricValue("time-between-network-thread-poll-avg", groupName); + assertMetricValue("time-between-network-thread-poll-max", groupName); } - @Test - public void shouldRecordApplicationEventQueueSize() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordApplicationEventQueueSize(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordApplicationEventQueueSize(10); // Then: assertEquals( + (double) 10, metrics.metric( metrics.metricName( "application-event-queue-size", - CONSUMER_METRIC_GROUP + groupName ) - ).metricValue(), - (double) 10 + ).metricValue() ); } - @Test - public void shouldRecordApplicationEventQueueTime() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordApplicationEventQueueTime(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordApplicationEventQueueTime(METRIC_VALUE); // Then: - assertMetricValue("application-event-queue-time-avg"); - assertMetricValue("application-event-queue-time-max"); + assertMetricValue("application-event-queue-time-avg", groupName); + assertMetricValue("application-event-queue-time-max", groupName); } - @Test - public void shouldRecordApplicationEventQueueProcessingTime() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordApplicationEventQueueProcessingTime(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordApplicationEventQueueProcessingTime(METRIC_VALUE); // Then: - assertMetricValue("application-event-queue-processing-time-avg"); - assertMetricValue("application-event-queue-processing-time-max"); + assertMetricValue("application-event-queue-processing-time-avg", groupName); + assertMetricValue("application-event-queue-processing-time-max", groupName); } - @Test - public void shouldRecordUnsentRequestsQueueSize() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordUnsentRequestsQueueSize(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordUnsentRequestsQueueSize(10, 100); // Then: assertEquals( + (double) 10, metrics.metric( metrics.metricName( "unsent-requests-queue-size", - CONSUMER_METRIC_GROUP + groupName ) - ).metricValue(), - (double) 10 + ).metricValue() ); } - @Test - public void shouldRecordUnsentRequestsQueueTime() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordUnsentRequestsQueueTime(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordUnsentRequestsQueueTime(METRIC_VALUE); // Then: - assertMetricValue("unsent-requests-queue-time-avg"); - assertMetricValue("unsent-requests-queue-time-max"); + assertMetricValue("unsent-requests-queue-time-avg", groupName); + assertMetricValue("unsent-requests-queue-time-max", groupName); } - @Test - public void shouldRecordBackgroundEventQueueSize() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordBackgroundEventQueueSize(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordBackgroundEventQueueSize(10); // Then: assertEquals( + (double) 10, metrics.metric( metrics.metricName( "background-event-queue-size", - CONSUMER_METRIC_GROUP + groupName ) - ).metricValue(), - (double) 10 + ).metricValue() ); } - @Test - public void shouldRecordBackgroundEventQueueTime() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordBackgroundEventQueueTime(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordBackgroundEventQueueTime(METRIC_VALUE); // Then: - assertMetricValue("background-event-queue-time-avg"); - assertMetricValue("background-event-queue-time-max"); + assertMetricValue("background-event-queue-time-avg", groupName); + assertMetricValue("background-event-queue-time-max", groupName); } - @Test - public void shouldRecordBackgroundEventQueueProcessingTime() { - consumerMetrics = new AsyncConsumerMetrics(metrics); + @ParameterizedTest + @MethodSource("groupNameProvider") + public void shouldRecordBackgroundEventQueueProcessingTime(String groupName) { + consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); // When: consumerMetrics.recordBackgroundEventQueueProcessingTime(METRIC_VALUE); // Then: - assertMetricValue("background-event-queue-processing-time-avg"); - assertMetricValue("background-event-queue-processing-time-avg"); + assertMetricValue("background-event-queue-processing-time-avg", groupName); + assertMetricValue("background-event-queue-processing-time-max", groupName); } - private void assertMetricValue(final String name) { + private void assertMetricValue(final String name, final String groupName) { assertEquals( + (double) METRIC_VALUE, metrics.metric( metrics.metricName( name, - CONSUMER_METRIC_GROUP + groupName ) - ).metricValue(), - (double) METRIC_VALUE + ).metricValue() ); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManagerTest.java new file mode 100644 index 0000000000000..a5c52fac09093 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManagerTest.java @@ -0,0 +1,341 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.metrics; + +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.SubscriptionState; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.metrics.MetricConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Optional; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + + +class ConsumerRebalanceMetricsManagerTest { + + private Time time; + private Metrics metrics; + private SubscriptionState subscriptionState; + private ConsumerRebalanceMetricsManager metricsManager; + private MetricConfig metricConfig; + private long windowSizeMs; + private int numSamples; + + @BeforeEach + public void setUp() { + time = new MockTime(); + // Use MetricConfig with its default values + windowSizeMs = 30000; // 30 seconds - default value + numSamples = 2; // default value + metricConfig = new MetricConfig() + .samples(numSamples) + .timeWindow(windowSizeMs, java.util.concurrent.TimeUnit.MILLISECONDS); + metrics = new Metrics(metricConfig, time); + subscriptionState = new SubscriptionState(mock(LogContext.class), AutoOffsetResetStrategy.EARLIEST); + metricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState); + } + + @AfterEach + public void tearDown() { + metrics.close(); + } + + @Test + public void testAssignedPartitionCountMetric() { + assertNotNull(metrics.metric(metricsManager.assignedPartitionsCount), "Metric assigned-partitions has not been registered as expected"); + + // Check for manually assigned partitions + subscriptionState.assignFromUser(Set.of(new TopicPartition("topic", 0), new TopicPartition("topic", 1))); + assertEquals(2.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue()); + subscriptionState.assignFromUser(Set.of()); + assertEquals(0.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue()); + + subscriptionState.unsubscribe(); + assertEquals(0.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue()); + + // Check for automatically assigned partitions + subscriptionState.subscribe(Set.of("topic"), Optional.empty()); + subscriptionState.assignFromSubscribed(Set.of(new TopicPartition("topic", 0))); + assertEquals(1.0d, metrics.metric(metricsManager.assignedPartitionsCount).metricValue()); + } + + @Test + public void testRebalanceTimingMetrics() { + + // Verify timing metrics are registered + assertNotNull(metrics.metric(metricsManager.rebalanceLatencyAvg)); + assertNotNull(metrics.metric(metricsManager.rebalanceLatencyMax)); + assertNotNull(metrics.metric(metricsManager.rebalanceLatencyTotal)); + assertNotNull(metrics.metric(metricsManager.rebalanceTotal)); + + // Record first rebalance (10ms duration) + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + // Verify metrics after first rebalance + assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue()); + assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue()); + assertEquals(10.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue()); + assertEquals(1.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue()); + + // Record second rebalance (30ms duration) + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(30); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + // Verify metrics after second rebalance + assertEquals(20.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(), + "Average latency should be (10 + 30) / 2 = 20ms"); + assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(), + "Max latency should be max(10, 30) = 30ms"); + assertEquals(40.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(), + "Total latency should be 10 + 30 = 40ms"); + assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue()); + + // Record third rebalance (50ms duration) + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(50); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + // Verify metrics after third rebalance + assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(), + "Average latency should be (10 + 30 + 50) / 3 = 30ms"); + assertEquals(50.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(), + "Max latency should be max(10, 30, 50) = 50ms"); + assertEquals(90.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(), + "Total latency should be 10 + 30 + 50 = 90ms"); + assertEquals(3.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue()); + } + + @Test + public void testRebalanceRateMetric() { + + // Verify rate metric is registered + assertNotNull(metrics.metric(metricsManager.rebalanceRatePerHour)); + + // Record 3 rebalances within 30ms total (3 x 10ms) + int rebalanceCount = 3; + long startTime = time.milliseconds(); + for (int i = 0; i < rebalanceCount; i++) { + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + metricsManager.recordRebalanceEnded(time.milliseconds()); + } + long endTime = time.milliseconds(); + long actualElapsedMs = endTime - startTime; + + double ratePerHour = (Double) metrics.metric(metricsManager.rebalanceRatePerHour).metricValue(); + + // The Rate metric calculation: + // - Uses elapsed time from the oldest sample + // - Ensures minimum window size of (numSamples - 1) * windowSizeMs + // - With default config: minWindow = (2-1) * 30000 = 30000ms + long minWindowMs = (numSamples - 1) * windowSizeMs; // (2-1) * 30000 = 30000ms + + // Since actualElapsedMs (30ms) is much less than minWindowMs (30000ms), + // the rate calculation will use minWindowMs as the window + // Rate per hour = count / (windowMs / 1000) * 3600 + double expectedRatePerHour = (double) rebalanceCount / (minWindowMs / 1000.0) * 3600.0; + + assertEquals(expectedRatePerHour, ratePerHour, 1.0, + String.format("With %d rebalances in %dms, min window %dms: expecting %.1f rebalances/hour", + rebalanceCount, actualElapsedMs, minWindowMs, expectedRatePerHour)); + } + + @Test + public void testFailedRebalanceMetrics() { + + // Verify failed rebalance metrics are registered + assertNotNull(metrics.metric(metricsManager.failedRebalanceTotal)); + assertNotNull(metrics.metric(metricsManager.failedRebalanceRate)); + + assertEquals(0.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(), + "Initially, there should be no failed rebalances"); + + // Start a rebalance but don't complete it + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + + metricsManager.maybeRecordRebalanceFailed(); + assertEquals(1.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(), + "Failed rebalance count should increment to 1 after recording failure"); + + // Complete a successful rebalance + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + metricsManager.maybeRecordRebalanceFailed(); + assertEquals(1.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(), + "Failed count should not increment after successful rebalance completes"); + + // Start another rebalance, don't complete it, then record failure + time.sleep(10); + metricsManager.recordRebalanceStarted(time.milliseconds()); + assertTrue(metricsManager.rebalanceStarted(), "Rebalance should be in progress"); + time.sleep(10); + // Don't call recordRebalanceEnded() to simulate an incomplete rebalance + metricsManager.maybeRecordRebalanceFailed(); + assertEquals(2.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue()); + + double failedRate = (Double) metrics.metric(metricsManager.failedRebalanceRate).metricValue(); + + // Calculate expected failed rate based on Rate metric behavior + // We had 2 failures over ~40ms, but minimum window is (numSamples - 1) * windowSizeMs + long minWindowMs = (numSamples - 1) * windowSizeMs; // (2-1) * 30000 = 30000ms + double expectedFailedRatePerHour = 2.0 / (minWindowMs / 1000.0) * 3600.0; + + assertEquals(expectedFailedRatePerHour, failedRate, 1.0, + String.format("With 2 failures, min window %dms: expecting %.1f failures/hour", + minWindowMs, expectedFailedRatePerHour)); + } + + @Test + public void testLastRebalanceSecondsAgoMetric() { + + // Verify metric is registered + assertNotNull(metrics.metric(metricsManager.lastRebalanceSecondsAgo)); + + assertEquals(-1.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(), + "Should return -1 when no rebalance has occurred"); + + // Complete a rebalance + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + assertEquals(0.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(), + "Should return 0 immediately after rebalance completes"); + + // Advance time by 5 seconds + time.sleep(5000); + assertEquals(5.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue()); + + // Advance time by another 10 seconds + time.sleep(10000); + assertEquals(15.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue()); + + // Complete another rebalance + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(20); + metricsManager.recordRebalanceEnded(time.milliseconds()); + + assertEquals(0.0d, metrics.metric(metricsManager.lastRebalanceSecondsAgo).metricValue(), + "Should reset to 0 after a new rebalance completes"); + } + + @Test + public void testRebalanceStartedFlag() { + + assertFalse(metricsManager.rebalanceStarted(), + "Initially, no rebalance should be in progress"); + + metricsManager.recordRebalanceStarted(time.milliseconds()); + assertTrue(metricsManager.rebalanceStarted(), + "Rebalance should be marked as started after recordRebalanceStarted()"); + + time.sleep(10); + metricsManager.recordRebalanceEnded(time.milliseconds()); + assertFalse(metricsManager.rebalanceStarted(), + "Rebalance should not be in progress after recordRebalanceEnded()"); + + // Start another rebalance - advance time first + time.sleep(100); + metricsManager.recordRebalanceStarted(time.milliseconds()); + assertTrue(metricsManager.rebalanceStarted(), + "New rebalance should be marked as started"); + } + + @Test + public void testMultipleConsecutiveFailures() { + + // Record multiple consecutive failures + for (int i = 0; i < 5; i++) { + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(10); + metricsManager.maybeRecordRebalanceFailed(); + } + + assertEquals(5.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(), + "Should have recorded 5 consecutive failed rebalances"); + + assertEquals(0.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue(), + "Successful rebalance count should remain 0 when only failures occur"); + } + + @Test + public void testMixedSuccessAndFailureScenarios() { + + // Success -> Failure -> Success -> Failure pattern + // First success + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(20); + metricsManager.recordRebalanceEnded(time.milliseconds()); + assertEquals(1.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue()); + + // First failure + time.sleep(10); + metricsManager.recordRebalanceStarted(time.milliseconds()); + assertTrue(metricsManager.rebalanceStarted(), "First failure rebalance should be in progress"); + time.sleep(30); + metricsManager.maybeRecordRebalanceFailed(); + + double failedAfterFirst = (Double) metrics.metric(metricsManager.failedRebalanceTotal).metricValue(); + assertEquals(1.0d, failedAfterFirst, "Should have recorded one failed rebalance after first failure"); + + // Second success + time.sleep(10); + metricsManager.recordRebalanceStarted(time.milliseconds()); + time.sleep(40); + metricsManager.recordRebalanceEnded(time.milliseconds()); + assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue()); + + // Second failure + time.sleep(10); + metricsManager.recordRebalanceStarted(time.milliseconds()); + assertTrue(metricsManager.rebalanceStarted(), "Second failure rebalance should be in progress"); + time.sleep(50); + metricsManager.maybeRecordRebalanceFailed(); + + assertEquals(2.0d, metrics.metric(metricsManager.rebalanceTotal).metricValue(), + "Should have 2 successful rebalances in mixed scenario"); + assertEquals(2.0d, metrics.metric(metricsManager.failedRebalanceTotal).metricValue(), + "Should have 2 failed rebalances in mixed scenario"); + + assertEquals(30.0d, metrics.metric(metricsManager.rebalanceLatencyAvg).metricValue(), + "Average latency should only include successful rebalances: (20 + 40) / 2 = 30ms"); + assertEquals(40.0d, metrics.metric(metricsManager.rebalanceLatencyMax).metricValue(), + "Max latency should be 40ms from successful rebalances only"); + assertEquals(60.0d, metrics.metric(metricsManager.rebalanceLatencyTotal).metricValue(), + "Total latency should only include successful rebalances: 20 + 40 = 60ms"); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index fbb3484a03f7f..9d8aa35f8fe90 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -32,12 +32,14 @@ import org.apache.kafka.clients.producer.internals.RecordAccumulator; import org.apache.kafka.clients.producer.internals.Sender; import org.apache.kafka.clients.producer.internals.TransactionManager; +import org.apache.kafka.clients.producer.internals.TransactionalRequestResult; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigException; @@ -45,10 +47,13 @@ import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeader; +import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.message.ApiVersionsResponseData; @@ -73,6 +78,7 @@ import org.apache.kafka.common.requests.EndTxnResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; +import org.apache.kafka.common.requests.InitProducerIdRequest; import org.apache.kafka.common.requests.InitProducerIdResponse; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.MetadataResponse; @@ -85,10 +91,10 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetrySender; -import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.utils.Time; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.MockPartitioner; @@ -102,6 +108,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.MockedStatic; import org.mockito.Mockito; @@ -158,6 +165,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.notNull; import static org.mockito.Mockito.atMostOnce; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.never; @@ -211,7 +219,7 @@ public void setup(TestInfo testInfo) { } @AfterEach - public void detectLeaks() { + public void detectLeaks() throws InterruptedException { // Assert no thread leakage of Kafka producer. TestUtils.assertNoLeakedThreadsWithNameAndDaemonStatus(NETWORK_THREAD_PREFIX, Boolean.TRUE); } @@ -227,21 +235,14 @@ public void testOverwriteAcksAndRetriesForIdempotentProducers() { ProducerConfig config = new ProducerConfig(props); assertTrue(config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)); assertTrue(Stream.of("-1", "all").anyMatch(each -> each.equalsIgnoreCase(config.getString(ProducerConfig.ACKS_CONFIG)))); - assertEquals((int) config.getInt(ProducerConfig.RETRIES_CONFIG), Integer.MAX_VALUE); + assertEquals(Integer.MAX_VALUE, (int) config.getInt(ProducerConfig.RETRIES_CONFIG)); assertTrue(config.getString(ProducerConfig.CLIENT_ID_CONFIG).equalsIgnoreCase("producer-" + config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG))); } @Test public void testAcksAndIdempotenceForIdempotentProducers() { - Properties baseProps = new Properties() {{ - setProperty( - ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - setProperty( - ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - setProperty( - ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - }}; + Properties baseProps = baseProperties(); Properties validProps = new Properties() {{ putAll(baseProps); @@ -344,11 +345,7 @@ public void testAcksAndIdempotenceForIdempotentProducers() { @Test public void testRetriesAndIdempotenceForIdempotentProducers() { - Properties baseProps = new Properties() {{ - setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - }}; + Properties baseProps = baseProperties(); Properties validProps = new Properties() {{ putAll(baseProps); @@ -410,13 +407,17 @@ public void testRetriesAndIdempotenceForIdempotentProducers() { "Must set retries to non-zero when using the transactional producer."); } + private Properties baseProperties() { + Properties baseProps = new Properties(); + baseProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + baseProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + baseProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + return baseProps; + } + @Test public void testInflightRequestsAndIdempotenceForIdempotentProducers() { - Properties baseProps = new Properties() {{ - setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - }}; + Properties baseProps = baseProperties(); Properties validProps = new Properties() {{ putAll(baseProps); @@ -578,7 +579,7 @@ public void testConstructorWithNotStringKey() { ConfigException ce = assertThrows( ConfigException.class, () -> new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())); - assertTrue(ce.getMessage().contains("not string key"), "Unexpected exception message: " + ce.getMessage()); + assertTrue(ce.getMessage().contains("One or more keys is not a string."), "Unexpected exception message: " + ce.getMessage()); } @Test @@ -603,12 +604,11 @@ public void testSerializerClose() { final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); - KafkaProducer producer = new KafkaProducer<>( - configs, new MockSerializer(), new MockSerializer()); - assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); - assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); + try (var ignored = new KafkaProducer<>(configs, new MockSerializer(), new MockSerializer())) { + assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); + assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); + } - producer.close(); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get()); } @@ -637,15 +637,15 @@ props, new StringSerializer(), new StringSerializer())) { MockProducerInterceptor.resetCounters(); } } + @Test public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemainingInstances() { - final int targetInterceptor = 3; + final int targetInterceptor = 1; try { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, org.apache.kafka.test.MockProducerInterceptor.class.getName() + ", " - + org.apache.kafka.test.MockProducerInterceptor.class.getName() + ", " - + org.apache.kafka.test.MockProducerInterceptor.class.getName()); + props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, + CloseInterceptor.class.getName() + "," + MockProducerInterceptor.class.getName()); props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); MockProducerInterceptor.setThrowOnConfigExceptionThreshold(targetInterceptor); @@ -654,13 +654,16 @@ public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemai new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()) ); - assertEquals(3, MockProducerInterceptor.CONFIG_COUNT.get()); - assertEquals(3, MockProducerInterceptor.CLOSE_COUNT.get()); + assertEquals(1, MockProducerInterceptor.CONFIG_COUNT.get()); + assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); + assertEquals(1, CloseInterceptor.CLOSE_COUNT.get()); } finally { MockProducerInterceptor.resetCounters(); + CloseInterceptor.resetCounters(); } } + @Test public void testPartitionerClose() { try { @@ -1084,13 +1087,14 @@ public void testTopicExpiryInMetadata() throws InterruptedException { @SuppressWarnings("unchecked") @Test - public void testHeaders() { + public void testHeadersSuccess() { doTestHeaders(Serializer.class); } private > void doTestHeaders(Class serializerClassToMock) { Map configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + configs.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorForHeaders.class.getName()); Serializer keySerializer = mock(serializerClassToMock); Serializer valueSerializer = mock(serializerClassToMock); @@ -1119,7 +1123,9 @@ private > void doTestHeaders(Class serializerCla producer.send(record, null); //ensure headers are closed and cannot be mutated post send - assertThrows(IllegalStateException.class, () -> record.headers().add(new RecordHeader("test", "test".getBytes()))); + RecordHeaders recordHeaders = (RecordHeaders) record.headers(); + assertTrue(recordHeaders.isReadOnly()); + assertThrows(IllegalStateException.class, () -> recordHeaders.add(new RecordHeader("test", "test".getBytes()))); //ensure existing headers are not changed, and last header for key is still original value assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes()); @@ -1130,6 +1136,28 @@ private > void doTestHeaders(Class serializerCla producer.close(Duration.ofMillis(0)); } + @Test + public void testHeadersFailure() { + Properties props = new Properties(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5); + props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorForHeaders.class.getName()); + Serializer keySerializer = mock(StringSerializer.class); + Serializer valueSerializer = mock(StringSerializer.class); + + KafkaProducer producer = new KafkaProducer<>(props, keySerializer, valueSerializer); + ProducerRecord record = new ProducerRecord<>("topic", "key", "value"); + Future future = producer.send(record, (recordMetadata, exception) -> { }); + try { + TestUtils.assertFutureThrows(TimeoutException.class, future); + //ensure headers are writable if send failure + RecordHeaders recordHeaders = (RecordHeaders) record.headers(); + assertFalse(recordHeaders.isReadOnly()); + } finally { + producer.close(Duration.ofMillis(0)); + } + } + @Test public void closeShouldBeIdempotent() { Properties producerProps = new Properties(); @@ -1289,7 +1317,7 @@ public void testInitTransactionsResponseAfterTimeout() throws Exception { ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE)); - Future future = executor.submit(producer::initTransactions); + Future future = executor.submit(() -> producer.initTransactions()); TestUtils.waitForCondition(client::hasInFlightRequests, "Timed out while waiting for expected `InitProducerId` request to be sent"); @@ -1364,6 +1392,297 @@ public void testInitTransactionWhileThrottled() { } } + @ParameterizedTest + @CsvSource({ + "true, false", + "true, true", + "false, true" + }) + public void testInitTransactionsWithKeepPreparedTxnAndTwoPhaseCommit(boolean keepPreparedTxn, boolean enable2PC) { + Map configs = new HashMap<>(); + configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-txn-id"); + configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000); + configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); + if (enable2PC) { + configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, true); + } + + Time time = new MockTime(1); + MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); + ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); + MockClient client = new MockClient(time, metadata); + client.updateMetadata(initialUpdateResponse); + + // Capture flags from the InitProducerIdRequest + boolean[] requestFlags = new boolean[2]; // [keepPreparedTxn, enable2Pc] + + client.prepareResponse( + request -> request instanceof FindCoordinatorRequest && + ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), + FindCoordinatorResponse.prepareResponse(Errors.NONE, "test-txn-id", NODE)); + + client.prepareResponse( + request -> { + if (request instanceof InitProducerIdRequest) { + InitProducerIdRequest initRequest = (InitProducerIdRequest) request; + requestFlags[0] = initRequest.data().keepPreparedTxn(); + requestFlags[1] = initRequest.data().enable2Pc(); + return true; + } + return false; + }, + initProducerIdResponse(1L, (short) 5, Errors.NONE)); + + try (Producer producer = kafkaProducer(configs, new StringSerializer(), + new StringSerializer(), metadata, client, null, time)) { + producer.initTransactions(keepPreparedTxn); + + // Verify request flags match expected values + assertEquals(keepPreparedTxn, requestFlags[0], + "keepPreparedTxn flag should match input parameter"); + assertEquals(enable2PC, requestFlags[1], + "enable2Pc flag should match producer configuration"); + } + } + + @Test + public void testPrepareTransactionSuccess() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + when(ctx.transactionManager.isTransactionV2Enabled()).thenReturn(true); + when(ctx.transactionManager.is2PCEnabled()).thenReturn(true); + when(ctx.sender.isRunning()).thenReturn(true); + + doNothing().when(ctx.transactionManager).prepareTransaction(); + + long expectedProducerId = 12345L; + short expectedEpoch = 5; + ProducerIdAndEpoch expectedProducerIdAndEpoch = new ProducerIdAndEpoch(expectedProducerId, expectedEpoch); + when(ctx.transactionManager.preparedTransactionState()).thenReturn(expectedProducerIdAndEpoch); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + PreparedTxnState returned = producer.prepareTransaction(); + assertEquals(expectedProducerId, returned.producerId()); + assertEquals(expectedEpoch, returned.epoch()); + + verify(ctx.transactionManager).prepareTransaction(); + verify(ctx.accumulator).beginFlush(); + verify(ctx.accumulator).awaitFlushCompletion(); + } + } + + @Test + public void testSendNotAllowedInPreparedTransactionState() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + String topic = "foo"; + Cluster cluster = TestUtils.singletonCluster(topic, 1); + + when(ctx.sender.isRunning()).thenReturn(true); + when(ctx.metadata.fetch()).thenReturn(cluster); + + // Mock transaction manager to simulate being in a prepared state + when(ctx.transactionManager.isTransactional()).thenReturn(true); + when(ctx.transactionManager.isPrepared()).thenReturn(true); + + // Create record to send + long timestamp = ctx.time.milliseconds(); + ProducerRecord record = new ProducerRecord<>(topic, 0, timestamp, "key", "value"); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + // Verify that sending a record throws IllegalStateException with the correct message + IllegalStateException exception = assertThrows( + IllegalStateException.class, + () -> producer.send(record) + ); + + assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); + + // Verify transactionManager methods were called + verify(ctx.transactionManager).isTransactional(); + verify(ctx.transactionManager).isPrepared(); + + // Verify that no message was actually sent (accumulator was not called) + verify(ctx.accumulator, never()).append( + eq(topic), + anyInt(), + anyLong(), + any(), + any(), + any(), + any(), + anyLong(), + anyLong(), + any() + ); + } + } + + @Test + public void testSendOffsetsNotAllowedInPreparedTransactionState() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + String topic = "foo"; + Cluster cluster = TestUtils.singletonCluster(topic, 1); + + when(ctx.sender.isRunning()).thenReturn(true); + when(ctx.metadata.fetch()).thenReturn(cluster); + + // Mock transaction manager to simulate being in a prepared state + when(ctx.transactionManager.isTransactional()).thenReturn(true); + when(ctx.transactionManager.isPrepared()).thenReturn(true); + + // Create consumer group metadata + String groupId = "test-group"; + Map offsets = new HashMap<>(); + offsets.put(new TopicPartition(topic, 0), new OffsetAndMetadata(100L)); + ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata(groupId); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + // Verify that sending offsets throws IllegalStateException with the correct message + IllegalStateException exception = assertThrows( + IllegalStateException.class, + () -> producer.sendOffsetsToTransaction(offsets, groupMetadata) + ); + + assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); + + // Verify transactionManager methods were called + verify(ctx.transactionManager).isTransactional(); + verify(ctx.transactionManager).isPrepared(); + + // Verify that no offsets were actually sent + verify(ctx.transactionManager, never()).sendOffsetsToTransaction( + eq(offsets), + eq(groupMetadata) + ); + } + } + + @Test + public void testBeginTransactionNotAllowedInPreparedTransactionState() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + when(ctx.sender.isRunning()).thenReturn(true); + + // Mock transaction manager to simulate being in a prepared state + when(ctx.transactionManager.isTransactional()).thenReturn(true); + when(ctx.transactionManager.isPrepared()).thenReturn(true); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + // Verify that calling beginTransaction throws IllegalStateException with the correct message + IllegalStateException exception = assertThrows( + IllegalStateException.class, + producer::beginTransaction + ); + + assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); + + // Verify transactionManager methods were called + verify(ctx.transactionManager).isTransactional(); + verify(ctx.transactionManager).isPrepared(); + } + } + + @Test + public void testPrepareTransactionFailsWhen2PCDisabled() { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + // Disable 2PC + when(ctx.transactionManager.isTransactionV2Enabled()).thenReturn(true); + when(ctx.transactionManager.is2PCEnabled()).thenReturn(false); + when(ctx.sender.isRunning()).thenReturn(true); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + assertThrows( + InvalidTxnStateException.class, + producer::prepareTransaction, + "prepareTransaction() should fail if 2PC is disabled" + ); + } + } + + @Test + public void testCompleteTransactionWithMatchingState() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + when(ctx.transactionManager.isPrepared()).thenReturn(true); + when(ctx.sender.isRunning()).thenReturn(true); + + // Create prepared states with matching values + long producerId = 12345L; + short epoch = 5; + PreparedTxnState inputState = new PreparedTxnState(producerId, epoch); + ProducerIdAndEpoch currentProducerIdAndEpoch = new ProducerIdAndEpoch(producerId, epoch); + + // Set up the transaction manager to return the prepared state + when(ctx.transactionManager.preparedTransactionState()).thenReturn(currentProducerIdAndEpoch); + + // Should trigger commit when states match + TransactionalRequestResult commitResult = mock(TransactionalRequestResult.class); + when(ctx.transactionManager.beginCommit()).thenReturn(commitResult); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + // Call completeTransaction with the matching state + producer.completeTransaction(inputState); + + // Verify methods called in order + verify(ctx.transactionManager).isPrepared(); + verify(ctx.transactionManager).preparedTransactionState(); + verify(ctx.transactionManager).beginCommit(); + + // Verify abort was never called + verify(ctx.transactionManager, never()).beginAbort(); + + // Verify sender was woken up + verify(ctx.sender).wakeup(); + } + } + + @Test + public void testCompleteTransactionWithNonMatchingState() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + when(ctx.transactionManager.isPrepared()).thenReturn(true); + when(ctx.sender.isRunning()).thenReturn(true); + + // Create txn prepared states with different values + long producerId = 12345L; + short epoch = 5; + PreparedTxnState inputState = new PreparedTxnState(producerId + 1, epoch); + ProducerIdAndEpoch currentProducerIdAndEpoch = new ProducerIdAndEpoch(producerId, epoch); + + // Set up the transaction manager to return the prepared state + when(ctx.transactionManager.preparedTransactionState()).thenReturn(currentProducerIdAndEpoch); + + // Should trigger abort when states don't match + TransactionalRequestResult abortResult = mock(TransactionalRequestResult.class); + when(ctx.transactionManager.beginAbort()).thenReturn(abortResult); + + try (KafkaProducer producer = ctx.newKafkaProducer()) { + // Call completeTransaction with the non-matching state + producer.completeTransaction(inputState); + + // Verify methods called in order + verify(ctx.transactionManager).isPrepared(); + verify(ctx.transactionManager).preparedTransactionState(); + verify(ctx.transactionManager).beginAbort(); + + // Verify commit was never called + verify(ctx.transactionManager, never()).beginCommit(); + + // Verify sender was woken up + verify(ctx.sender).wakeup(); + } + } + @Test public void testClusterAuthorizationFailure() throws Exception { int maxBlockMs = 500; @@ -1424,7 +1743,7 @@ public void testTransactionV2ProduceWithConcurrentTransactionError() throws Exce KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); String topic = "foo"; - TopicPartition topicPartition = new TopicPartition(topic, 0); + TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, topic); Cluster cluster = TestUtils.singletonCluster(topic, 1); when(ctx.sender.isRunning()).thenReturn(true); @@ -1463,8 +1782,8 @@ public void testTransactionV2ProduceWithConcurrentTransactionError() throws Exce client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some-txn", NODE)); client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); - client.prepareResponse(produceResponse(topicPartition, 1L, Errors.CONCURRENT_TRANSACTIONS, 0, 1)); - client.prepareResponse(produceResponse(topicPartition, 1L, Errors.NONE, 0, 1)); + client.prepareResponse(produceResponse(topicIdPartition, 1L, Errors.CONCURRENT_TRANSACTIONS, 0, 1)); + client.prepareResponse(produceResponse(topicIdPartition, 1L, Errors.NONE, 0, 1)); client.prepareResponse(endTxnResponse(Errors.NONE)); try (KafkaProducer producer = new KafkaProducer<>( @@ -1508,7 +1827,7 @@ public void testMeasureAbortTransactionDuration() { } @Test - public void testCommitTransactionWithRecordTooLargeException() throws Exception { + public void testCommitTransactionWithRecordTooLargeException() { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); @@ -1538,7 +1857,7 @@ public void testCommitTransactionWithRecordTooLargeException() throws Exception } @Test - public void testCommitTransactionWithMetadataTimeoutForMissingTopic() throws Exception { + public void testCommitTransactionWithMetadataTimeoutForMissingTopic() { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); @@ -1575,7 +1894,7 @@ public void testCommitTransactionWithMetadataTimeoutForMissingTopic() throws Exc } @Test - public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() throws Exception { + public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); @@ -1612,7 +1931,7 @@ public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() thr } @Test - public void testCommitTransactionWithSendToInvalidTopic() throws Exception { + public void testCommitTransactionWithSendToInvalidTopic() { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); @@ -1750,6 +2069,7 @@ public void testTransactionV2Produce() throws Exception { KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); String topic = "foo"; + Uuid topicId = Uuid.fromString("klZ9sa2rSvig6QpgGXzALT"); TopicPartition topicPartition = new TopicPartition(topic, 0); Cluster cluster = TestUtils.singletonCluster(topic, 1); @@ -1789,7 +2109,7 @@ public void testTransactionV2Produce() throws Exception { client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some-txn", NODE)); client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); - client.prepareResponse(produceResponse(topicPartition, 1L, Errors.NONE, 0, 1)); + client.prepareResponse(produceResponse(new TopicIdPartition(topicId, topicPartition), 1L, Errors.NONE, 0, 1)); client.prepareResponse(endTxnResponse(Errors.NONE)); try (KafkaProducer producer = new KafkaProducer<>( @@ -2048,7 +2368,7 @@ public void testOnlyCanExecuteCloseAfterInitTransactionsTimeout() { } @Test - public void testSendToInvalidTopic() throws Exception { + public void testSendToInvalidTopic() { Map configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); @@ -2500,6 +2820,29 @@ public void configure(Map configs) { } } + public static class ProducerInterceptorForHeaders implements ProducerInterceptor { + + @Override + public ProducerRecord onSend(ProducerRecord record) { + return record; + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { + RecordHeaders recordHeaders = (RecordHeaders) headers; + // Ensure that the headers are read-only, no matter send success or send failure + assertTrue(recordHeaders.isReadOnly()); + } + + @Override + public void close() { + } + + @Override + public void configure(Map configs) { + } + } + public static class ProducerInterceptorForClientId implements ProducerInterceptor { @Override @@ -2542,7 +2885,7 @@ private static class KafkaProducerTestContext { private final Map configs; private final Serializer serializer; private final Partitioner partitioner = mock(Partitioner.class); - private final KafkaThread ioThread = mock(KafkaThread.class); + private final Sender.SenderThread senderThread = mock(Sender.SenderThread.class); private final List> interceptors = new ArrayList<>(); private ProducerMetadata metadata = mock(ProducerMetadata.class); private RecordAccumulator accumulator = mock(RecordAccumulator.class); @@ -2623,7 +2966,7 @@ public KafkaProducer newKafkaProducer() { interceptors, partitioner, time, - ioThread, + senderThread, Optional.empty() ); } @@ -2649,9 +2992,9 @@ void testDeliveryTimeoutAndLingerMsConfig() { } @SuppressWarnings("deprecation") - private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, int logStartOffset) { + private ProduceResponse produceResponse(TopicIdPartition topicIdPartition, long offset, Errors error, int throttleTimeMs, int logStartOffset) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); - Map partResp = singletonMap(tp, resp); + Map partResp = singletonMap(topicIdPartition, resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -2831,9 +3174,13 @@ private MetricName expectedMetricName(String clientId, String config, Class c private static final String NAME = "name"; private static final String DESCRIPTION = "description"; - private static final Map TAGS = Collections.singletonMap("k", "v"); + private static final LinkedHashMap TAGS = new LinkedHashMap<>(); private static final double VALUE = 123.0; + static { + TAGS.put("t1", "v1"); + } + public static class MonitorableSerializer extends MockSerializer implements Monitorable { @Override @@ -2860,4 +3207,38 @@ public void withPluginMetrics(PluginMetrics metrics) { metrics.addMetric(name, (Measurable) (config, now) -> VALUE); } } + + public static class CloseInterceptor implements ProducerInterceptor { + + public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); + + @Override + public ProducerRecord onSend(ProducerRecord record) { + return null; + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + ProducerInterceptor.super.onAcknowledgement(metadata, exception); + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { + ProducerInterceptor.super.onAcknowledgement(metadata, exception, headers); + } + + @Override + public void close() { + CLOSE_COUNT.incrementAndGet(); + } + + @Override + public void configure(Map configs) { + // no-op + } + + public static void resetCounters() { + CLOSE_COUNT.set(0); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java index 6ec8164c26805..e66dcca504438 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java @@ -53,9 +53,9 @@ public class MockProducerTest { private final String topic = "topic"; - private MockProducer producer; - private final ProducerRecord record1 = new ProducerRecord<>(topic, "key1".getBytes(), "value1".getBytes()); - private final ProducerRecord record2 = new ProducerRecord<>(topic, "key2".getBytes(), "value2".getBytes()); + private MockProducer producer; + private final ProducerRecord record1 = new ProducerRecord<>(topic, "key1", "value1"); + private final ProducerRecord record2 = new ProducerRecord<>(topic, "key2", "value2"); private final String groupId = "group"; private void buildMockProducer(boolean autoComplete) { @@ -318,7 +318,7 @@ public void shouldPublishMessagesOnlyAfterCommitIfTransactionsAreEnabled() { producer.commitTransaction(); - List> expectedResult = new ArrayList<>(); + List> expectedResult = new ArrayList<>(); expectedResult.add(record1); expectedResult.add(record2); @@ -385,7 +385,7 @@ public void shouldPreserveCommittedMessagesOnAbortIfTransactionsAreEnabled() { producer.beginTransaction(); producer.abortTransaction(); - List> expectedResult = new ArrayList<>(); + List> expectedResult = new ArrayList<>(); expectedResult.add(record1); expectedResult.add(record2); @@ -724,10 +724,10 @@ public void testMetadataOnException() throws InterruptedException { buildMockProducer(false); Future metadata = producer.send(record2, (md, exception) -> { assertNotNull(md); - assertEquals(md.offset(), -1L, "Invalid offset"); - assertEquals(md.timestamp(), RecordBatch.NO_TIMESTAMP, "Invalid timestamp"); - assertEquals(md.serializedKeySize(), -1L, "Invalid Serialized Key size"); - assertEquals(md.serializedValueSize(), -1L, "Invalid Serialized value size"); + assertEquals(-1L, md.offset(), "Invalid offset"); + assertEquals(RecordBatch.NO_TIMESTAMP, md.timestamp(), "Invalid timestamp"); + assertEquals(-1L, md.serializedKeySize(), "Invalid Serialized Key size"); + assertEquals(-1L, md.serializedValueSize(), "Invalid Serialized value size"); }); IllegalArgumentException e = new IllegalArgumentException("dummy exception"); assertTrue(producer.errorNext(e), "Complete the second request with an error"); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/PreparedTxnStateTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/PreparedTxnStateTest.java new file mode 100644 index 0000000000000..f0d94434d4795 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/producer/PreparedTxnStateTest.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.producer; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for the PreparedTxnState class part of the #KafkaProducer class. + */ +public class PreparedTxnStateTest { + + @Test + public void testDefaultConstructor() { + PreparedTxnState state = new PreparedTxnState(); + assertEquals("", state.toString(), "Empty state should serialize to an empty string"); + assertEquals(-1L, state.producerId(), "Default producerId should be -1"); + assertEquals((short) -1, state.epoch(), "Default epoch should be -1"); + assertFalse(state.hasTransaction(), "Default state should not have a transaction"); + } + + @Test + public void testParameterizedConstructor() { + long producerId = 123L; + short epoch = 45; + PreparedTxnState state = new PreparedTxnState(producerId, epoch); + assertEquals(producerId, state.producerId(), "ProducerId should match"); + assertEquals(epoch, state.epoch(), "Epoch should match"); + assertTrue(state.hasTransaction(), "State should have a transaction"); + assertEquals("123:45", state.toString(), "Serialized form should match expected format"); + } + + @Test + public void testDeserializationFromString() { + String serialized = "123:45"; + PreparedTxnState state = new PreparedTxnState(serialized); + assertEquals(serialized, state.toString(), "Deserialized state should match the original serialized string"); + assertEquals(123L, state.producerId(), "Deserialized producerId should match"); + assertEquals((short) 45, state.epoch(), "Deserialized epoch should match"); + assertTrue(state.hasTransaction(), "Deserialized state should have a transaction"); + } + + @Test + public void testRoundTripSerialization() { + // Create initialized state from string, then convert back to string + String original = "9876:54"; + PreparedTxnState state = new PreparedTxnState(original); + String serialized = state.toString(); + assertEquals(original, serialized, "Round-trip serialization should preserve values"); + + // Deserialize again to verify + PreparedTxnState stateAgain = new PreparedTxnState(serialized); + assertEquals(original, stateAgain.toString(), "Re-deserialized state should match original"); + assertEquals(state.producerId(), stateAgain.producerId(), "Producer IDs should match"); + assertEquals(state.epoch(), stateAgain.epoch(), "Epochs should match"); + + // Test round trip for uninitialized state (empty string) + String emptyString = ""; + PreparedTxnState emptyState = new PreparedTxnState(emptyString); + String emptyStateSerialized = emptyState.toString(); + assertEquals(emptyString, emptyStateSerialized, "Round-trip of empty string should remain empty"); + assertEquals(-1L, emptyState.producerId(), "Empty string should result in producerId -1"); + assertEquals((short) -1, emptyState.epoch(), "Empty string should result in epoch -1"); + + // Deserialize empty state again to verify + PreparedTxnState emptyStateAgain = new PreparedTxnState(emptyStateSerialized); + assertEquals(emptyString, emptyStateAgain.toString(), "Re-deserialized empty state should still be empty"); + assertEquals(-1L, emptyStateAgain.producerId(), "Empty string should result in producerId -1"); + assertEquals((short) -1, emptyStateAgain.epoch(), "Empty string should result in epoch -1"); + } + + @Test + public void testHandlingOfNullOrEmptyString() { + PreparedTxnState stateWithNull = new PreparedTxnState(null); + assertEquals("", stateWithNull.toString(), "Null string should result in empty state"); + assertFalse(stateWithNull.hasTransaction(), "State from null string should not have a transaction"); + + PreparedTxnState stateWithEmpty = new PreparedTxnState(""); + assertEquals("", stateWithEmpty.toString(), "Empty string should result in empty state"); + assertFalse(stateWithEmpty.hasTransaction(), "State from empty string should not have a transaction"); + } + + @Test + public void testMaxValues() { + // Test with maximum possible values for producer ID and epoch + String maxValues = Long.MAX_VALUE + ":" + Short.MAX_VALUE; + PreparedTxnState state = new PreparedTxnState(maxValues); + assertEquals(maxValues, state.toString(), "Max values should be handled correctly"); + assertEquals(Long.MAX_VALUE, state.producerId(), "Max producer ID should be handled correctly"); + assertEquals(Short.MAX_VALUE, state.epoch(), "Max epoch should be handled correctly"); + assertTrue(state.hasTransaction(), "State with max values should have a transaction"); + } + + @Test + public void testEqualsAndHashCode() { + PreparedTxnState state1 = new PreparedTxnState(123L, (short) 45); + PreparedTxnState state2 = new PreparedTxnState(123L, (short) 45); + PreparedTxnState state3 = new PreparedTxnState(456L, (short) 78); + PreparedTxnState state4 = new PreparedTxnState(123L, (short) 46); + + // Test equals + assertEquals(state1, state2, "Equal states should be equal"); + assertNotEquals(state1, state3, "States with different producer IDs should not be equal"); + assertNotEquals(state1, state4, "States with different epochs should not be equal"); + assertNotEquals(null, state1, "State should not equal null"); + + // Test hashCode + assertEquals(state1.hashCode(), state2.hashCode(), "Equal states should have same hash code"); + assertNotEquals(state1.hashCode(), state3.hashCode(), "Different states should have different hash codes"); + } + + @Test + public void testHasTransaction() { + // State with transaction (producer ID >= 0) + PreparedTxnState stateWithTransaction = new PreparedTxnState(0L, (short) 0); + assertTrue(stateWithTransaction.hasTransaction(), "State with producerId 0 should have a transaction"); + + // State without transaction (producer ID = -1) + PreparedTxnState stateWithoutTransaction = new PreparedTxnState(-1L, (short) -1); + assertFalse(stateWithoutTransaction.hasTransaction(), "State with producerId -1 should not have a transaction"); + } + + @Test + public void testInvalidFormatThrowsException() { + // Test with invalid format - missing epoch + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("123"), + "String with missing epoch should throw IllegalArgumentException"); + + // Test with invalid format - too many parts + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("123:45:67"), + "String with extra parts should throw IllegalArgumentException"); + + // Test with non-numeric producer ID + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("abc:45"), + "Non-numeric producer ID should throw IllegalArgumentException"); + + // Test with non-numeric epoch + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("123:xyz"), + "Non-numeric epoch should throw IllegalArgumentException"); + } + + @Test + public void testInvalidProducerIdEpochCombinations() { + // Valid combinations: both >= 0 + new PreparedTxnState("0:0"); + new PreparedTxnState("123:45"); + + // Invalid: producerId >= 0, epoch < 0 + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("123:-2"), + "Positive producerId with negative epoch (not -1) should throw IllegalArgumentException"); + + // Invalid: producerId < 0 (not -1), epoch >= 0 + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("-2:45"), + "Negative producerId (not -1) with positive epoch should throw IllegalArgumentException"); + + // Invalid: producerId < 0 (not -1), epoch < 0 (not -1) + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("-2:-2"), + "Negative producerId and epoch (not -1) should throw IllegalArgumentException"); + + // Invalid: producerId = -1, epoch >= 0 + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("-1:45"), + "ProducerId -1 with positive epoch should throw IllegalArgumentException"); + + // Invalid: producerId >= 0, epoch = -1 + assertThrows(IllegalArgumentException.class, + () -> new PreparedTxnState("123:-1"), + "Positive producerId with epoch -1 should throw IllegalArgumentException"); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java index 830711c0e5449..5fd9ab727e046 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.MetadataRecoveryStrategy; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.serialization.ByteArraySerializer; @@ -26,14 +27,18 @@ import org.junit.jupiter.api.Test; +import java.io.FileInputStream; +import java.io.InputStream; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Properties; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class ProducerConfigTest { @@ -88,6 +93,7 @@ public void testInvalidCompressionType() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "abc"); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); } @@ -97,6 +103,7 @@ public void testInvalidSecurityProtocol() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -106,6 +113,7 @@ public void testDefaultMetadataRecoveryStrategy() { Map configs = new HashMap<>(); configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ProducerConfig producerConfig = new ProducerConfig(configs); assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, producerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -116,6 +124,7 @@ public void testInvalidMetadataRecoveryStrategy() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "abc"); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -127,6 +136,7 @@ public void testCaseInsensitiveSecurityProtocol() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ProducerConfig producerConfig = new ProducerConfig(configs); assertEquals(saslSslLowerCase, producerConfig.originals().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -138,6 +148,7 @@ void testUpperboundCheckOfEnableIdempotence() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, inFlightConnection); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " must be set to at most 5. Current value is " + inFlightConnection + ".", configException.getMessage()); @@ -145,4 +156,50 @@ void testUpperboundCheckOfEnableIdempotence() { configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); assertDoesNotThrow(() -> new ProducerConfig(configs)); } + + @Test + void testTwoPhaseCommitIncompatibleWithTransactionTimeout() { + Map configs = new HashMap<>(); + configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); + configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); + configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-txn-id"); + configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, true); + configs.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60000); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); + assertTrue(ce.getMessage().contains(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)); + assertTrue(ce.getMessage().contains(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG)); + + // Verify that setting one but not the other is valid + configs.remove(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); + assertDoesNotThrow(() -> new ProducerConfig(configs)); + + configs.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60000); + configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, false); + assertDoesNotThrow(() -> new ProducerConfig(configs)); + } + + /** + * Validates config/producer.properties file to avoid getting out of sync with ProducerConfig. + */ + @Test + public void testValidateConfigPropertiesFile() { + Properties props = new Properties(); + + try (InputStream inputStream = new FileInputStream(System.getProperty("user.dir") + "/../config/producer.properties")) { + props.load(inputStream); + } catch (Exception e) { + fail("Failed to load config/producer.properties file: " + e.getMessage()); + } + + ProducerConfig config = new ProducerConfig(props); + + for (String key : config.originals().keySet()) { + if (!ProducerConfig.configDef().configKeys().containsKey(key)) { + fail("Invalid configuration key: " + key); + } + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java index 128e15ed6c60a..727368e8edd1b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java @@ -219,7 +219,7 @@ public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception t1.join(); t2.join(); // both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty - assertEquals(pool.queued(), 0); + assertEquals(0, pool.queued()); } @Test @@ -332,7 +332,7 @@ protected ByteBuffer allocateByteBuffer(int size) { } - assertEquals(bufferPool.availableMemory(), 1024); + assertEquals(1024, bufferPool.availableMemory()); } public static class StressTestThread extends Thread { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java index 46d1ed329eee2..383aa82ee2d88 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java @@ -121,8 +121,8 @@ private void assertMetricRemoved(final String name) { private void assertMetricValue(final String name) { assertEquals( - metrics.metric(metrics.metricName(name, KafkaProducerMetrics.GROUP)).metricValue(), - (double) METRIC_VALUE + (double) METRIC_VALUE, + metrics.metric(metrics.metricName(name, KafkaProducerMetrics.GROUP)).metricValue() ); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java index 853b27b255114..c81de1d74fc52 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.Test; @@ -68,9 +69,10 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { onAckCount++; if (exception != null) { onErrorAckCount++; - // the length check is just to call topic() method and let it throw an exception - // if RecordMetadata.TopicPartition is null - if (metadata != null && metadata.topic().length() >= 0) { + if (metadata != null) { + if (metadata.topic() == null) { + throw new NullPointerException("Topic is null"); + } onErrorAckWithTopicSetCount++; if (metadata.partition() >= 0) onErrorAckWithTopicPartitionSetCount++; @@ -95,13 +97,69 @@ public void injectOnAcknowledgementError(boolean on) { } } + private class AppendNewProducerInterceptor implements ProducerInterceptor { + private final String appendStr; + private boolean throwExceptionOnSend = false; + private boolean throwExceptionOnAck = false; + + public AppendNewProducerInterceptor(String appendStr) { + this.appendStr = appendStr; + } + + @Override + public void configure(Map configs) { + } + + @Override + public ProducerRecord onSend(ProducerRecord record) { + onSendCount++; + if (throwExceptionOnSend) + throw new KafkaException("Injected exception in AppendNewProducerInterceptor.onSend"); + + return new ProducerRecord<>( + record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { + onAckCount++; + if (exception != null) { + onErrorAckCount++; + if (metadata != null) { + if (metadata.topic() == null) { + throw new NullPointerException("Topic is null"); + } + onErrorAckWithTopicSetCount++; + if (metadata.partition() >= 0) + onErrorAckWithTopicPartitionSetCount++; + } + } + if (throwExceptionOnAck) + throw new KafkaException("Injected exception in AppendNewProducerInterceptor.onAcknowledgement"); + } + + @Override + public void close() { + } + + // if 'on' is true, onSend will always throw an exception + public void injectOnSendError(boolean on) { + throwExceptionOnSend = on; + } + + // if 'on' is true, onAcknowledgement will always throw an exception + public void injectOnAcknowledgementError(boolean on) { + throwExceptionOnAck = on; + } + } + @Test public void testOnSendChain() { List> interceptorList = new ArrayList<>(); // we are testing two different interceptors by configuring the same interceptor differently, which is not // how it would be done in KafkaProducer, but ok for testing interceptor callbacks AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); + AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); @@ -139,23 +197,23 @@ public void testOnAcknowledgementChain() { // we are testing two different interceptors by configuring the same interceptor differently, which is not // how it would be done in KafkaProducer, but ok for testing interceptor callbacks AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); + AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); // verify onAck is called on all interceptors RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, 0, 0); - interceptors.onAcknowledgement(meta, null); + interceptors.onAcknowledgement(meta, null, null); assertEquals(2, onAckCount); // verify that onAcknowledgement exceptions do not propagate interceptor1.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null); + interceptors.onAcknowledgement(meta, null, null); assertEquals(4, onAckCount); interceptor2.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null); + interceptors.onAcknowledgement(meta, null, null); assertEquals(6, onAckCount); interceptors.close(); @@ -165,27 +223,29 @@ public void testOnAcknowledgementChain() { public void testOnAcknowledgementWithErrorChain() { List> interceptorList = new ArrayList<>(); AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); + AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); interceptorList.add(interceptor1); + interceptorList.add(interceptor2); ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); // verify that metadata contains both topic and partition interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test")); - assertEquals(1, onErrorAckCount); - assertEquals(1, onErrorAckWithTopicPartitionSetCount); + assertEquals(2, onErrorAckCount); + assertEquals(2, onErrorAckWithTopicPartitionSetCount); // verify that metadata contains both topic and partition (because record already contains partition) interceptors.onSendError(producerRecord, null, new KafkaException("Test")); - assertEquals(2, onErrorAckCount); - assertEquals(2, onErrorAckWithTopicPartitionSetCount); + assertEquals(4, onErrorAckCount); + assertEquals(4, onErrorAckWithTopicPartitionSetCount); // if producer record does not contain partition, interceptor should get partition == -1 ProducerRecord record2 = new ProducerRecord<>("test2", null, 1, "value"); interceptors.onSendError(record2, null, new KafkaException("Test")); - assertEquals(3, onErrorAckCount); - assertEquals(3, onErrorAckWithTopicSetCount); - assertEquals(2, onErrorAckWithTopicPartitionSetCount); + assertEquals(6, onErrorAckCount); + assertEquals(6, onErrorAckWithTopicSetCount); + assertEquals(4, onErrorAckWithTopicPartitionSetCount); // if producer record does not contain partition, but topic/partition is passed to // onSendError, then interceptor should get valid partition @@ -193,15 +253,15 @@ public void testOnAcknowledgementWithErrorChain() { interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test")); - assertEquals(4, onErrorAckCount); - assertEquals(4, onErrorAckWithTopicSetCount); - assertEquals(3, onErrorAckWithTopicPartitionSetCount); + assertEquals(8, onErrorAckCount); + assertEquals(8, onErrorAckWithTopicSetCount); + assertEquals(6, onErrorAckWithTopicPartitionSetCount); // if both record and topic/partition are null, interceptor should not receive metadata interceptors.onSendError(null, null, new KafkaException("Test")); - assertEquals(5, onErrorAckCount); - assertEquals(4, onErrorAckWithTopicSetCount); - assertEquals(3, onErrorAckWithTopicPartitionSetCount); + assertEquals(10, onErrorAckCount); + assertEquals(8, onErrorAckWithTopicSetCount); + assertEquals(6, onErrorAckWithTopicPartitionSetCount); interceptors.close(); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java index 64a1b41a14ec2..bec0eb2fcff30 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java @@ -29,10 +29,8 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -257,14 +255,14 @@ public void testMetadataPartialUpdate() { assertTrue(metadata.updateRequested()); assertEquals(0, metadata.timeToNextUpdate(now)); - assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); - assertEquals(metadata.newTopics(), new HashSet<>(Arrays.asList(topic2, topic3))); + assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); + assertEquals(metadata.newTopics(), Set.of(topic2, topic3)); // Perform the partial update for a subset of the new topics. now += 1000; assertTrue(metadata.updateRequested()); metadata.updateWithCurrentRequestVersion(responseWithTopics(Collections.singleton(topic2)), true, now); - assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); + assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); assertEquals(metadata.newTopics(), Collections.singleton(topic3)); } @@ -302,7 +300,7 @@ public void testRequestUpdateForTopic() { // Perform the full update. This should clear the update request. now += 1000; - metadata.updateWithCurrentRequestVersion(responseWithTopics(new HashSet<>(Arrays.asList(topic1, topic2))), false, now); + metadata.updateWithCurrentRequestVersion(responseWithTopics(Set.of(topic1, topic2)), false, now); assertFalse(metadata.updateRequested()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index cac3d2648b1e9..750440d2595a5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -58,13 +58,15 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Deque; -import java.util.HashSet; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; +import java.util.PriorityQueue; import java.util.Random; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -78,6 +80,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -167,7 +170,7 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // drain batches from 2 nodes: node1 => tp1, node2 => tp3, because the max request size is full after the first batch drained - Map> batches1 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches1 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches1, tp1, tp3); // add record for tp1, tp3 @@ -176,11 +179,11 @@ public void testDrainBatches() throws Exception { // drain batches from 2 nodes: node1 => tp2, node2 => tp4, because the max request size is full after the first batch drained // The drain index should start from next topic partition, that is, node1 => tp2, node2 => tp4 - Map> batches2 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches2 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches2, tp2, tp4); // make sure in next run, the drain index will start from the beginning - Map> batches3 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches3 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches3, tp1, tp3); // add record for tp2, tp3, tp4 and mute the tp4 @@ -189,7 +192,7 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.mutePartition(tp4); // drain batches from 2 nodes: node1 => tp2, node2 => tp3 (because tp4 is muted) - Map> batches4 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches4 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches4, tp2, tp3); // add record for tp1, tp2, tp3, and unmute tp4 @@ -198,12 +201,12 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.unmutePartition(tp4); // set maxSize as a max value, so that the all partitions in 2 nodes should be drained: node1 => [tp1, tp2], node2 => [tp3, tp4] - Map> batches5 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), Integer.MAX_VALUE, 0); + Map> batches5 = accum.drain(metadataCache, Set.of(node1, node2), Integer.MAX_VALUE, 0); verifyTopicPartitionInBatches(batches5, tp1, tp2, tp3, tp4); } private void verifyTopicPartitionInBatches(Map> nodeBatches, TopicPartition... tp) { - int allTpBatchCount = (int) nodeBatches.values().stream().flatMap(Collection::stream).count(); + int allTpBatchCount = (int) nodeBatches.values().stream().mapToLong(Collection::size).sum(); assertEquals(tp.length, allTpBatchCount); List topicPartitionsInBatch = new ArrayList<>(); for (Map.Entry> entry : nodeBatches.entrySet()) { @@ -889,7 +892,7 @@ public void testExpiredBatches() throws InterruptedException { readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); Map> drained = accum.drain(metadataCache, readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals(drained.get(node1.id()).size(), 1, "There should be only one batch."); + assertEquals(1, drained.get(node1.id()).size(), "There should be only one batch."); time.sleep(1000L); accum.reenqueue(drained.get(node1.id()).get(0), time.milliseconds()); @@ -1430,7 +1433,7 @@ public void testReadyAndDrainWhenABatchIsBeingRetried() throws InterruptedExcept // Try to drain from node1, it should return no batches. Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node1)), 999999 /* maxSize */, now); + Set.of(node1), 999999 /* maxSize */, now); assertTrue(batches.containsKey(node1.id()) && batches.get(node1.id()).isEmpty(), "No batches ready to be drained on Node1"); } @@ -1511,7 +1514,7 @@ public void testDrainWithANodeThatDoesntHostAnyPartitions() { // Drain for node2, it should return 0 batches, Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node2)), 999999 /* maxSize */, time.milliseconds()); + Set.of(node2), 999999 /* maxSize */, time.milliseconds()); assertTrue(batches.get(node2.id()).isEmpty()); } @@ -1610,22 +1613,6 @@ private int expectedNumAppends(int batchSize) { } } - /** - * Return the offset delta when there is no key. - */ - private int expectedNumAppendsNoKey(int batchSize) { - int size = 0; - int offsetDelta = 0; - while (true) { - int recordSize = DefaultRecord.sizeInBytes(offsetDelta, 0, 0, value.length, - Record.EMPTY_HEADERS); - if (size + recordSize > batchSize) - return offsetDelta; - offsetDelta += 1; - size += recordSize; - } - } - private RecordAccumulator createTestRecordAccumulator(int batchSize, long totalSize, Compression compression, int lingerMs) { int deliveryTimeoutMs = 3200; return createTestRecordAccumulator(deliveryTimeoutMs, batchSize, totalSize, compression, lingerMs); @@ -1682,4 +1669,125 @@ int randomPartition() { return mockRandom == null ? super.randomPartition() : mockRandom.getAndIncrement(); } } + + /** + * This test verifies that RecordAccumulator's batch splitting functionality + * correctly handles oversized batches + * by splitting them down to individual records when necessary. It ensures that: + * 1. The splitting process can reduce batches to single-record size + * 2. The process does not enter infinite recursion loops + * 3. No records are lost or duplicated during splitting + * 4. The correct batch state is maintained throughout the process + */ + @Test + public void testSplitAndReenqueuePreventInfiniteRecursion() throws InterruptedException { + // Initialize test environment with a large batch size + long now = time.milliseconds(); + int batchSize = 1024 * 1024; // 1MB batch size + RecordAccumulator accum = createTestRecordAccumulator(batchSize, 10 * batchSize, Compression.gzip().build(), + 10); + + // Create a large producer batch manually (bypassing the accumulator's normal + // append process) + ByteBuffer buffer = ByteBuffer.allocate(batchSize); + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 0L); + ProducerBatch bigBatch = new ProducerBatch(tp1, builder, now, true); + + // Populate the batch with multiple records (100 records of 1KB each) + byte[] largeValue = new byte[1024]; // Each record is 1KB + for (int i = 0; i < 100; i++) { + ByteBuffer keyBytes = ByteBuffer.allocate(4); + keyBytes.putInt(i); // Use the loop counter as the key for verification later + FutureRecordMetadata result = bigBatch.tryAppend(time.milliseconds(), keyBytes.array(), largeValue, + Record.EMPTY_HEADERS, null, time.milliseconds()); + assertNotNull(result); + } + bigBatch.close(); + + time.sleep(101L); // Ensure the batch has time to become ready for processing + + // Add the batch to the accumulator for splitting + accum.reenqueue(bigBatch, time.milliseconds()); + + // Iteratively split batches until we find single-record batches + // This section tests the core batch splitting functionality + int splitOperations = 0; + int maxSplitOperations = 100; // Safety limit to prevent infinite recursion + boolean foundSingleRecordBatch = false; + + // Use a comparator that puts the batch with the most records first + Comparator reverseComparator = (batch1, batch2) -> Integer.compare(batch2.recordCount, + batch1.recordCount); + + while (splitOperations < maxSplitOperations && !foundSingleRecordBatch) { + // Get the current batches for this topic-partition + Deque tp1Deque = accum.getDeque(tp1); + if (tp1Deque.isEmpty()) { + break; + } + + // Find the batch with the most records + PriorityQueue tp1PriorityQue = new PriorityQueue<>(reverseComparator); + tp1PriorityQue.addAll(tp1Deque); + ProducerBatch batch = tp1PriorityQue.poll(); + if (batch == null) { + break; + } + + // If we've found a batch with only one record, we've reached our goal + if (batch.recordCount == 1) { + foundSingleRecordBatch = true; + break; + } + + // Remove the batch from the deque before splitting it + tp1Deque.remove(batch); + + // Split the batch and track the operation + int numSplitBatches = accum.splitAndReenqueue(batch); + splitOperations++; + + // If splitting produced no new batches (shouldn't happen with multi-record + // batches) + // mark the batch as complete + if (numSplitBatches == 0) { + assertEquals(1, batch.recordCount, "Unsplittable batch should have only 1 record"); + batch.complete(0L, 0L); + foundSingleRecordBatch = true; + } + } + + // Verification section: Check that the splitting process worked as expected + + // Verify that we found a single-record batch, proving that splitting can reach + // that level + assertTrue(foundSingleRecordBatch, "Should eventually produce batches with single records"); + + // Verify we didn't hit our safety limit, which would indicate potential + // infinite recursion + assertTrue(splitOperations < maxSplitOperations, + "Should not hit the safety limit, indicating no infinite recursion"); + + // Verify all remaining batches have at most one record + Deque finalDeque = accum.getDeque(tp1); + + Map keyFoundMap = new HashMap<>(); + // Check each batch and verify record integrity + for (ProducerBatch batch : finalDeque) { + assertTrue(batch.recordCount <= 1, "All remaining batches should have at most 1 record"); + + // Extract the record and its key + MemoryRecords batchRecords = batch.records(); + Iterator recordIterator = batchRecords.records().iterator(); + Record singleRecord = recordIterator.next(); + + // Track keys to ensure no duplicates (putIfAbsent returns null if the key + // wasn't present) + assertNull(keyFoundMap.putIfAbsent(singleRecord.key().getInt(), true), + "Each key should appear exactly once in the split batches"); + } + + // Verify all original records are accounted for (no data loss) + assertEquals(100, keyFoundMap.size(), "All original 100 records should be present after splitting"); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java index e67be76eb9baf..cd984ac2a343e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java @@ -33,7 +33,9 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InvalidRequestException; @@ -94,6 +96,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import org.mockito.InOrder; import java.nio.ByteBuffer; @@ -153,9 +157,15 @@ public class SenderTest { private static final int DELIVERY_TIMEOUT_MS = 1500; private static final long TOPIC_IDLE_MS = 60 * 1000; - private final TopicPartition tp0 = new TopicPartition("test", 0); - private final TopicPartition tp1 = new TopicPartition("test", 1); - private final TopicPartition tp2 = new TopicPartition("test", 2); + private static final String TOPIC_NAME = "test"; + private static final Uuid TOPIC_ID = Uuid.fromString("MKXx1fIkQy2J9jXHhK8m1w"); + private static final Map TOPIC_IDS = Map.of( + TOPIC_NAME, TOPIC_ID, + "testSplitBatchAndSend", Uuid.fromString("2J9hK8m1wHMKjXfIkQyXx1") + ); + private final TopicPartition tp0 = new TopicPartition(TOPIC_NAME, 0); + private final TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 1); + private final TopicPartition tp2 = new TopicPartition(TOPIC_NAME, 2); private MockTime time = new MockTime(); private final int batchSize = 16 * 1024; private final ProducerMetadata metadata = new ProducerMetadata(0, 0, Long.MAX_VALUE, TOPIC_IDLE_MS, @@ -171,6 +181,11 @@ public class SenderTest { @BeforeEach public void setup() { setupWithTransactionState(null); + apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); + this.client.updateMetadata( + RequestTestUtils.metadataUpdateWithIds(1, + Collections.singletonMap(TOPIC_NAME, 3), + TOPIC_IDS)); } @AfterEach @@ -181,12 +196,18 @@ public void tearDown() { private static Map partitionRecords(ProduceRequest request) { Map partitionRecords = new HashMap<>(); request.data().topicData().forEach(tpData -> tpData.partitionData().forEach(p -> { - TopicPartition tp = new TopicPartition(tpData.name(), p.index()); + String topicName = tpData.name(); + + if (request.version() >= 13 && tpData.topicId() != Uuid.ZERO_UUID) { + topicName = TOPIC_IDS.entrySet().stream().filter(e -> e.getValue() == tpData.topicId()).map(Map.Entry::getKey).findFirst().get(); + } + + TopicPartition tp = new TopicPartition(topicName, p.index()); partitionRecords.put(tp, (MemoryRecords) p.records()); })); return Collections.unmodifiableMap(partitionRecords); } - + @Test public void testSimple() throws Exception { long offset = 0; @@ -213,7 +234,7 @@ public void testSimple() throws Exception { public void testQuotaMetrics() { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry); - Cluster cluster = TestUtils.singletonCluster("test", 1); + Cluster cluster = TestUtils.singletonCluster(TOPIC_NAME, 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, 10 * 1000, 127 * 1000, @@ -353,11 +374,11 @@ public void testSendInOrder() throws Exception { Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1 - MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("test", 2)); + MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWithIds(2, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS); client.prepareMetadataUpdate(metadataUpdate1); // Send the first message. - TopicPartition tp2 = new TopicPartition("test", 1); + TopicPartition tp2 = new TopicPartition(TOPIC_NAME, 1); appendToAccumulator(tp2, 0L, "key1", "value1"); sender.runOnce(); // connect sender.runOnce(); // send produce request @@ -374,7 +395,7 @@ public void testSendInOrder() throws Exception { appendToAccumulator(tp2, 0L, "key2", "value2"); // Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0 - MetadataResponse metadataUpdate2 = RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2)); + MetadataResponse metadataUpdate2 = RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS); client.prepareMetadataUpdate(metadataUpdate2); // Sender should not send the second message to node 0. assertEquals(1, sender.inFlightBatches(tp2).size()); @@ -449,12 +470,12 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { @Test public void testMetadataTopicExpiry() throws Exception { long offset = 0; - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); + client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); Future future = appendToAccumulator(tp0); sender.runOnce(); assertTrue(metadata.containsTopic(tp0.topic()), "Topic not added to metadata"); - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); + client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); sender.runOnce(); // send produce request client.respond(produceResponse(tp0, offset, Errors.NONE, 0)); sender.runOnce(); @@ -466,12 +487,12 @@ public void testMetadataTopicExpiry() throws Exception { assertTrue(metadata.containsTopic(tp0.topic()), "Topic not retained in metadata list"); time.sleep(TOPIC_IDLE_MS); - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); + client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); assertFalse(metadata.containsTopic(tp0.topic()), "Unused topic has not been expired"); future = appendToAccumulator(tp0); sender.runOnce(); assertTrue(metadata.containsTopic(tp0.topic()), "Topic not added to metadata"); - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); + client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); sender.runOnce(); // send produce request client.respond(produceResponse(tp0, offset + 1, Errors.NONE, 0)); sender.runOnce(); @@ -490,7 +511,7 @@ public void senderThreadShouldNotGetStuckWhenThrottledAndAddingPartitionsToTxn() ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -503,7 +524,7 @@ public void senderThreadShouldNotGetStuckWhenThrottledAndAddingPartitionsToTxn() // Verify node is throttled a little bit. In real-life Apache Kafka, we observe that this can happen // as done above by throttling or with a disconnect / backoff. long currentPollDelay = client.pollDelayMs(nodeToThrottle, startTime); - assertEquals(currentPollDelay, throttleTimeMs); + assertEquals(throttleTimeMs, currentPollDelay); txnManager.beginTransaction(); txnManager.maybeAddPartition(tp0); @@ -534,6 +555,8 @@ public void testNodeLatencyStats() throws Exception { new BufferPool(totalSize, batchSize, m, time, "producer-internal-metrics")); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); + apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); + Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, senderMetrics, time, REQUEST_TIMEOUT, 1000L, null); @@ -616,15 +639,15 @@ public void testInitProducerIdWithMaxInFlightOne() { // Initialize transaction manager. InitProducerId will be queued up until metadata response // is processed and FindCoordinator can be sent to `leastLoadedNode`. TransactionManager transactionManager = new TransactionManager(new LogContext(), "testInitProducerIdWithPendingMetadataRequest", - 60000, 100L, new ApiVersions()); + 60000, 100L, new ApiVersions(), false); setupWithTransactionState(transactionManager, false, null, false); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0); - transactionManager.initializeTransactions(); + transactionManager.initializeTransactions(false); sender.runOnce(); // Process metadata response, prepare FindCoordinator and InitProducerId responses. // Verify producerId after the sender is run to process responses. - MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap()); + MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), Collections.emptyMap()); client.respond(metadataUpdate); prepareFindCoordinatorResponse(Errors.NONE, "testInitProducerIdWithPendingMetadataRequest"); prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); @@ -649,7 +672,7 @@ public void testIdempotentInitProducerIdWithMaxInFlightOne() { // Process metadata and InitProducerId responses. // Verify producerId after the sender is run to process responses. - MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap()); + MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), Collections.emptyMap()); client.respond(metadataUpdate); sender.runOnce(); sender.runOnce(); @@ -668,10 +691,10 @@ public void testNodeNotReady() { client = new MockClient(time, metadata); TransactionManager transactionManager = new TransactionManager(new LogContext(), "testNodeNotReady", - 60000, 100L, new ApiVersions()); + 60000, 100L, new ApiVersions(), false); setupWithTransactionState(transactionManager, false, null, true); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0); - transactionManager.initializeTransactions(); + transactionManager.initializeTransactions(false); sender.runOnce(); Node node = metadata.fetch().nodes().get(0); @@ -1510,7 +1533,7 @@ public void testExpiryOfFirstBatchShouldCauseEpochBumpIfFutureBatchesFail() thro public void testUnresolvedSequencesAreNotFatal() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -1795,7 +1818,7 @@ public void testCorrectHandlingOfDuplicateSequenceError() throws Exception { @Test public void testTransactionalUnknownProducerHandlingWhenRetentionLimitReached() throws Exception { final long producerId = 343434L; - TransactionManager transactionManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); + TransactionManager transactionManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); setupWithTransactionState(transactionManager); doInitTransactions(transactionManager, new ProducerIdAndEpoch(producerId, (short) 0)); @@ -2339,38 +2362,44 @@ public void testBumpEpochWhenOutOfOrderSequenceReceived() throws InterruptedExce @Test public void testIdempotentSplitBatchAndSend() throws Exception { - TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); + TopicIdPartition tpId = new TopicIdPartition( + TOPIC_IDS.getOrDefault("testSplitBatchAndSend", Uuid.ZERO_UUID), + new TopicPartition("testSplitBatchAndSend", 1)); TransactionManager txnManager = createTransactionManager(); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); setupWithTransactionState(txnManager); prepareAndReceiveInitProducerId(123456L, Errors.NONE); assertTrue(txnManager.hasProducerId()); - testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); + testSplitBatchAndSend(txnManager, producerIdAndEpoch, tpId); } @Test public void testTransactionalSplitBatchAndSend() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); - TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions); + TopicIdPartition tpId = new TopicIdPartition( + TOPIC_IDS.getOrDefault("testSplitBatchAndSend", Uuid.ZERO_UUID), + new TopicPartition("testSplitBatchAndSend", 1)); + + TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); txnManager.beginTransaction(); - txnManager.maybeAddPartition(tp); - client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp, Errors.NONE))); + txnManager.maybeAddPartition(tpId.topicPartition()); + apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); + client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tpId.topicPartition(), Errors.NONE))); sender.runOnce(); - testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); + testSplitBatchAndSend(txnManager, producerIdAndEpoch, tpId); } @SuppressWarnings("deprecation") private void testSplitBatchAndSend(TransactionManager txnManager, ProducerIdAndEpoch producerIdAndEpoch, - TopicPartition tp) throws Exception { + TopicIdPartition tpId) throws Exception { int maxRetries = 1; - String topic = tp.topic(); + String topic = tpId.topic(); int deliveryTimeoutMs = 3000; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; @@ -2384,35 +2413,39 @@ private void testSplitBatchAndSend(TransactionManager txnManager, Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager); // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1 - MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap(topic, 2)); + MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWithIds(2, Collections.singletonMap(topic, 2), TOPIC_IDS); client.prepareMetadataUpdate(metadataUpdate1); + metadataUpdate1.brokers().forEach(node -> + apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) + ); + // Send the first message. long nowMs = time.milliseconds(); Cluster cluster = TestUtils.singletonCluster(); Future f1 = - accumulator.append(tp.topic(), tp.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; + accumulator.append(tpId.topic(), tpId.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; Future f2 = - accumulator.append(tp.topic(), tp.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; + accumulator.append(tpId.topic(), tpId.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; sender.runOnce(); // connect sender.runOnce(); // send produce request - assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence should be 2"); + assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence should be 2"); String id = client.requests().peek().destination(); assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey()); Node node = new Node(Integer.parseInt(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - Map responseMap = new HashMap<>(); - responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE)); + Map responseMap = new HashMap<>(); + responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE)); client.respond(new ProduceResponse(responseMap)); sender.runOnce(); // split and reenqueue - assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence should be 2"); + assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence should be 2"); // The compression ratio should have been improved once. assertEquals(CompressionType.GZIP.rate - CompressionRatioEstimator.COMPRESSION_RATIO_IMPROVING_STEP, CompressionRatioEstimator.estimation(topic, CompressionType.GZIP), 0.01); sender.runOnce(); // send the first produce request - assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should be 2"); + assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should be 2"); assertFalse(f1.isDone(), "The future shouldn't have been done."); assertFalse(f2.isDone(), "The future shouldn't have been done."); id = client.requests().peek().destination(); @@ -2421,14 +2454,14 @@ private void testSplitBatchAndSend(TransactionManager txnManager, assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); - client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), + responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + client.respond(produceRequestMatcher(tpId.topicPartition(), producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap)); sender.runOnce(); // receive assertTrue(f1.isDone(), "The future should have been done."); - assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should still be 2"); - assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 0"); + assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should still be 2"); + assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tpId.topicPartition()), "The last ack'd sequence number should be 0"); assertFalse(f2.isDone(), "The future shouldn't have been done."); assertEquals(0L, f1.get().offset(), "Offset of the first message should be 0"); sender.runOnce(); // send the second produce request @@ -2438,16 +2471,16 @@ private void testSplitBatchAndSend(TransactionManager txnManager, assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L)); - client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), + responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L)); + client.respond(produceRequestMatcher(tpId.topicPartition(), producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap)); sender.runOnce(); // receive assertTrue(f2.isDone(), "The future should have been done."); - assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should be 2"); - assertEquals(OptionalInt.of(1), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 1"); + assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should be 2"); + assertEquals(OptionalInt.of(1), txnManager.lastAckedSequence(tpId.topicPartition()), "The last ack'd sequence number should be 1"); assertEquals(1L, f2.get().offset(), "Offset of the first message should be 1"); - assertTrue(accumulator.getDeque(tp).isEmpty(), "There should be no batch in the accumulator"); + assertTrue(accumulator.getDeque(tpId.topicPartition()).isEmpty(), "There should be no batch in the accumulator"); assertTrue((Double) (m.metrics().get(senderMetrics.batchSplitRate).metricValue()) > 0, "There should be a split"); } } @@ -2491,8 +2524,8 @@ public void testInflightBatchesExpireOnDeliveryTimeout() throws InterruptedExcep assertEquals(1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator"); - Map responseMap = new HashMap<>(); - responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + Map responseMap = new HashMap<>(); + responseMap.put(new TopicIdPartition(TOPIC_ID, tp0), new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap)); time.sleep(deliveryTimeoutMs); @@ -2671,8 +2704,8 @@ public void testExpiredBatchesInMultiplePartitions() throws Exception { assertEquals(1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator"); - Map responseMap = new HashMap<>(); - responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + Map responseMap = new HashMap<>(); + responseMap.put(new TopicIdPartition(TOPIC_ID, tp0), new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap)); // Successfully expire both batches. @@ -2694,7 +2727,7 @@ public void testTransactionalRequestsSentOnShutdown() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions, false); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); @@ -2727,7 +2760,7 @@ public void testRecordsFlushedImmediatelyOnTransactionCompletion() throws Except int lingerMs = 50; SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); - TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions, false); setupWithTransactionState(txnManager, lingerMs); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, @@ -2784,7 +2817,7 @@ public void testAwaitPendingRecordsBeforeCommittingTransaction() throws Exceptio try (Metrics m = new Metrics()) { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); - TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions, false); setupWithTransactionState(txnManager); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, @@ -2855,7 +2888,7 @@ public void testIncompleteTransactionAbortOnShutdown() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions, false); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); @@ -2889,7 +2922,7 @@ public void testForceShutdownWithIncompleteTransaction() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testForceShutdownWithIncompleteTransaction", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testForceShutdownWithIncompleteTransaction", 6000, 100, apiVersions, false); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); @@ -2919,7 +2952,7 @@ public void testForceShutdownWithIncompleteTransaction() { @Test public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager, false, null); doInitTransactions(txnManager, producerIdAndEpoch); @@ -2945,7 +2978,7 @@ public void testTransactionAbortedExceptionOnAbortWithoutError() throws Interrup public void testDoNotPollWhenNoRequestSent() { client = spy(new MockClient(time, metadata)); - TransactionManager txnManager = new TransactionManager(logContext, "testDoNotPollWhenNoRequestSent", 6000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testDoNotPollWhenNoRequestSent", 6000, 100, apiVersions, false); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -2957,7 +2990,7 @@ public void testDoNotPollWhenNoRequestSent() { @Test public void testTooLargeBatchesAreSafelyRemoved() throws InterruptedException { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager, false, null); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3000,8 +3033,62 @@ public void testCustomErrorMessage() throws Exception { verifyErrorMessage(produceResponse(tp0, 0L, Errors.INVALID_REQUEST, 0, -1, errorMessage), errorMessage); } + @ParameterizedTest + @EnumSource(value = Errors.class, names = {"COORDINATOR_LOAD_IN_PROGRESS", "INVALID_TXN_STATE"}) + public void testTransactionShouldTransitionToAbortableForSenderAPI(Errors error) throws InterruptedException { + ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); + TransactionManager txnManager = new TransactionManager( + logContext, + "testRetriableException", + 60000, + RETRY_BACKOFF_MS, + apiVersions, + false + ); + + // Setup with transaction state and initialize transactions with single retry + setupWithTransactionState(txnManager, false, null, 1); + doInitTransactions(txnManager, producerIdAndEpoch); + + // Begin transaction and add partition + txnManager.beginTransaction(); + txnManager.maybeAddPartition(tp0); + client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp0, Errors.NONE))); + sender.runOnce(); + + // First produce request + appendToAccumulator(tp0); + client.prepareResponse(produceResponse(tp0, -1, error, -1)); + sender.runOnce(); + + // Sleep for retry backoff + time.sleep(RETRY_BACKOFF_MS); + + // Second attempt to process record - PREPARE the response before sending + client.prepareResponse(produceResponse(tp0, -1, error, -1)); + sender.runOnce(); + + // Now transaction should be in abortable state after retry is exhausted + assertTrue(txnManager.hasAbortableError()); + + // Second produce request - should fail with TransactionAbortableException + Future future2 = appendToAccumulator(tp0); + client.prepareResponse(produceResponse(tp0, -1, Errors.NONE, -1)); + // Sender will try to send and fail with TransactionAbortableException instead of COORDINATOR_LOAD_IN_PROGRESS, because we're in abortable state + sender.runOnce(); + assertFutureFailure(future2, TransactionAbortableException.class); + + // Verify transaction API requests also fail with TransactionAbortableException + try { + txnManager.beginCommit(); + fail("Expected beginCommit() to fail with TransactionAbortableException when in abortable error state"); + } catch (KafkaException e) { + assertEquals(TransactionAbortableException.class, e.getCause().getClass()); + } + } + @Test - public void testSenderShouldRetryWithBackoffOnRetriableError() { + public void testSenderShouldRetryWithBackoffOnRetriableError() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); @@ -3026,7 +3113,7 @@ public void testSenderShouldRetryWithBackoffOnRetriableError() { public void testReceiveFailedBatchTwiceWithTransactions() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testFailTwice", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testFailTwice", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3076,7 +3163,7 @@ public void testReceiveFailedBatchTwiceWithTransactions() throws Exception { public void testInvalidTxnStateIsAnAbortableError() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testInvalidTxnState", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "testInvalidTxnState", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3115,7 +3202,7 @@ public void testInvalidTxnStateIsAnAbortableError() throws Exception { public void testTransactionAbortableExceptionIsAnAbortableError() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "textTransactionAbortableException", 60000, 100, apiVersions); + TransactionManager txnManager = new TransactionManager(logContext, "textTransactionAbortableException", 60000, 100, apiVersions, false); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3149,6 +3236,61 @@ public void testTransactionAbortableExceptionIsAnAbortableError() throws Excepti txnManager.beginTransaction(); } + + @Test + public void testAbortableErrorIsConvertedToFatalErrorDuringAbort() throws Exception { + + // Initialize and begin transaction + TransactionManager transactionManager = new TransactionManager(logContext, "testAbortableErrorIsConvertedToFatalErrorDuringAbort", 60000, 100, apiVersions, false); + setupWithTransactionState(transactionManager); + doInitTransactions(transactionManager, new ProducerIdAndEpoch(1L, (short) 0)); + transactionManager.beginTransaction(); + + // Add partition and send record + TopicPartition tp = new TopicPartition("test", 0); + addPartitionToTxn(sender, transactionManager, tp); + appendToAccumulator(tp); + + // Send record and get response + sender.runOnce(); + sendIdempotentProducerResponse(0, tp, Errors.NONE, 0); + sender.runOnce(); + + // Commit API with TRANSACTION_ABORTABLE error should set TM to Abortable state + client.prepareResponse(new EndTxnResponse(new EndTxnResponseData() + .setErrorCode(Errors.TRANSACTION_ABORTABLE.code()))); + + // Attempt to commit transaction + TransactionalRequestResult commitResult = transactionManager.beginCommit(); + sender.runOnce(); + try { + commitResult.await(1000, TimeUnit.MILLISECONDS); + fail("Expected abortable error to be thrown for commit"); + } catch (KafkaException e) { + assertTrue(transactionManager.hasAbortableError()); + assertEquals(TransactionAbortableException.class, commitResult.error().getClass()); + } + + // Abort API with TRANSACTION_ABORTABLE error should convert to Fatal error i.e. KafkaException + client.prepareResponse(new EndTxnResponse(new EndTxnResponseData() + .setErrorCode(Errors.TRANSACTION_ABORTABLE.code()))); + + // Attempt to abort transaction + TransactionalRequestResult abortResult = transactionManager.beginAbort(); + sender.runOnce(); + + // Verify the error is converted to KafkaException (not TransactionAbortableException) + try { + abortResult.await(1000, TimeUnit.MILLISECONDS); + fail("Expected KafkaException to be thrown"); + } catch (KafkaException e) { + // Verify TM is in FATAL_ERROR state + assertTrue(transactionManager.hasFatalError()); + assertFalse(e instanceof TransactionAbortableException); + assertEquals(KafkaException.class, abortResult.error().getClass()); + } + } + @Test public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exception { Metrics m = new Metrics(); @@ -3171,7 +3313,8 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio int tp0LeaderEpoch = 100; int epoch = tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1)), tp -> { if (tp0.equals(tp)) { return epoch; @@ -3198,7 +3341,8 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio // Update leader epoch for tp0 int newEpoch = ++tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1)), tp -> { if (tp0.equals(tp)) { return newEpoch; @@ -3285,7 +3429,8 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorButNoNewLead int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 3), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; @@ -3364,7 +3509,8 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 3), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; @@ -3377,6 +3523,9 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader } })); Cluster startingMetadataCluster = metadata.fetch(); + startingMetadataCluster.nodes().forEach(node -> + apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) + ); // Produce to tp0/1/2, where NO_LEADER_OR_FOLLOWER with new leader info is returned for tp0/1, and tp2 is returned without errors. Future futureIsProducedTp0 = appendToAccumulator(tp0, 0L, "key", "value"); @@ -3397,7 +3546,9 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader responses.put(tp0, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER)); responses.put(tp1, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER)); responses.put(tp2, new OffsetAndError(100, Errors.NONE)); - + newNodes.forEach(node -> + apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) + ); Map partitionLeaderInfo = new HashMap<>(); ProduceResponseData.LeaderIdAndEpoch tp0LeaderInfo = new ProduceResponseData.LeaderIdAndEpoch(); tp0LeaderInfo.setLeaderEpoch(tp0LeaderEpoch + 1); @@ -3561,7 +3712,7 @@ private FutureRecordMetadata appendToAccumulator(TopicPartition tp, long timesta private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, long logStartOffset, String errorMessage) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset, Collections.emptyList(), errorMessage); - Map partResp = Collections.singletonMap(tp, resp); + Map partResp = Collections.singletonMap(new TopicIdPartition(TOPIC_ID, tp), resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -3577,9 +3728,11 @@ private ProduceResponse produceResponse(Map resp for (Map.Entry entry : responses.entrySet()) { TopicPartition topicPartition = entry.getKey(); - ProduceResponseData.TopicProduceResponse topicData = data.responses().find(topicPartition.topic()); + ProduceResponseData.TopicProduceResponse topicData = data.responses().find(topicPartition.topic(), TOPIC_ID); if (topicData == null) { - topicData = new ProduceResponseData.TopicProduceResponse().setName(topicPartition.topic()); + topicData = new ProduceResponseData.TopicProduceResponse() + .setTopicId(TOPIC_ID) + .setName(topicPartition.topic()); data.responses().add(topicData); } @@ -3617,7 +3770,7 @@ private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors e } private TransactionManager createTransactionManager() { - return new TransactionManager(new LogContext(), null, 0, RETRY_BACKOFF_MS, new ApiVersions()); + return new TransactionManager(new LogContext(), null, 0, RETRY_BACKOFF_MS, new ApiVersions(), false); } private void setupWithTransactionState(TransactionManager transactionManager) { @@ -3632,6 +3785,10 @@ private void setupWithTransactionState(TransactionManager transactionManager, bo setupWithTransactionState(transactionManager, guaranteeOrder, customPool, true, Integer.MAX_VALUE, 0); } + private void setupWithTransactionState(TransactionManager transactionManager, boolean guaranteeOrder, BufferPool customPool, int retries) { + setupWithTransactionState(transactionManager, guaranteeOrder, customPool, true, retries, 0); + } + private void setupWithTransactionState( TransactionManager transactionManager, boolean guaranteeOrder, @@ -3661,9 +3818,9 @@ private void setupWithTransactionState( this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, guaranteeOrder, MAX_REQUEST_SIZE, ACKS_ALL, retries, this.senderMetricsRegistry, this.time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); - metadata.add("test", time.milliseconds()); + metadata.add(TOPIC_NAME, time.milliseconds()); if (updateMetadata) - this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); } private void assertSuccessfulSend() throws InterruptedException { @@ -3719,7 +3876,7 @@ private InitProducerIdResponse initProducerIdResponse(long producerId, short pro } private void doInitTransactions(TransactionManager transactionManager, ProducerIdAndEpoch producerIdAndEpoch) { - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, transactionManager.transactionalId()); sender.runOnce(); sender.runOnce(); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index 8b4decfb9598c..7815b751d8004 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -26,7 +26,9 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -131,10 +133,13 @@ public class TransactionManagerTest { private final int transactionTimeoutMs = 1121; private final String topic = "test"; + private static final Uuid TOPIC_ID = Uuid.fromString("y2J9jXHhfIkQ1wK8mMKXx1"); private final TopicPartition tp0 = new TopicPartition(topic, 0); private final TopicPartition tp1 = new TopicPartition(topic, 1); private final long producerId = 13131L; private final short epoch = 1; + private final long ongoingProducerId = 999L; + private final short bumpedOngoingEpoch = 11; private final String consumerGroupId = "myConsumerGroup"; private final String memberId = "member"; private final int generationId = 5; @@ -149,7 +154,7 @@ public class TransactionManagerTest { private RecordAccumulator accumulator = null; private Sender sender = null; - private TransactionManager transactionManager = null; + private TestableTransactionManager transactionManager = null; private Node brokerNode = null; private long finalizedFeaturesEpoch = 0; @@ -159,17 +164,28 @@ public void setup() { this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap("test", 2))); this.brokerNode = new Node(0, "localhost", 2211); - initializeTransactionManager(Optional.of(transactionalId), false); + initializeTransactionManager(Optional.of(transactionalId), false, false); + } + + private void initializeTransactionManager( + Optional transactionalId, + boolean transactionV2Enabled + ) { + initializeTransactionManager(transactionalId, transactionV2Enabled, false); } - private void initializeTransactionManager(Optional transactionalId, boolean transactionV2Enabled) { + private void initializeTransactionManager( + Optional transactionalId, + boolean transactionV2Enabled, + boolean enable2pc + ) { Metrics metrics = new Metrics(time); apiVersions.update("0", new NodeApiVersions(Arrays.asList( new ApiVersion() .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) .setMinVersion((short) 0) - .setMaxVersion((short) 3), + .setMaxVersion((short) 6), new ApiVersion() .setApiKey(ApiKeys.PRODUCE.id) .setMinVersion((short) 0) @@ -188,8 +204,9 @@ private void initializeTransactionManager(Optional transactionalId, bool .setMinVersionLevel(transactionV2Enabled ? (short) 2 : (short) 1)), finalizedFeaturesEpoch)); finalizedFeaturesEpoch += 1; - this.transactionManager = new TransactionManager(logContext, transactionalId.orElse(null), - transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions); + this.transactionManager = new TestableTransactionManager(logContext, transactionalId.orElse(null), + transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions, enable2pc); + int batchSize = 16 * 1024; int deliveryTimeoutMs = 3000; @@ -606,9 +623,9 @@ public void testIsSendToPartitionAllowedWithPartitionNotAdded() { @ValueSource(booleans = {true, false}) public void testDefaultSequenceNumber(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); - assertEquals(transactionManager.sequenceNumber(tp0), 0); + assertEquals(0, transactionManager.sequenceNumber(tp0)); transactionManager.incrementSequenceNumber(tp0, 3); - assertEquals(transactionManager.sequenceNumber(tp0), 3); + assertEquals(3, transactionManager.sequenceNumber(tp0)); } @ParameterizedTest @@ -832,13 +849,13 @@ private ProducerBatch batchWithValue(TopicPartition tp, String value) { @ValueSource(booleans = {true, false}) public void testSequenceNumberOverflow(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); - assertEquals(transactionManager.sequenceNumber(tp0), 0); + assertEquals(0, transactionManager.sequenceNumber(tp0)); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); - assertEquals(transactionManager.sequenceNumber(tp0), Integer.MAX_VALUE); + assertEquals(Integer.MAX_VALUE, transactionManager.sequenceNumber(tp0)); transactionManager.incrementSequenceNumber(tp0, 100); - assertEquals(transactionManager.sequenceNumber(tp0), 99); + assertEquals(99, transactionManager.sequenceNumber(tp0)); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); - assertEquals(transactionManager.sequenceNumber(tp0), 98); + assertEquals(98, transactionManager.sequenceNumber(tp0)); } @ParameterizedTest @@ -846,17 +863,17 @@ public void testSequenceNumberOverflow(boolean transactionV2Enabled) { public void testProducerIdReset(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); initializeIdempotentProducerId(15L, Short.MAX_VALUE); - assertEquals(transactionManager.sequenceNumber(tp0), 0); - assertEquals(transactionManager.sequenceNumber(tp1), 0); + assertEquals(0, transactionManager.sequenceNumber(tp0)); + assertEquals(0, transactionManager.sequenceNumber(tp1)); transactionManager.incrementSequenceNumber(tp0, 3); - assertEquals(transactionManager.sequenceNumber(tp0), 3); + assertEquals(3, transactionManager.sequenceNumber(tp0)); transactionManager.incrementSequenceNumber(tp1, 3); - assertEquals(transactionManager.sequenceNumber(tp1), 3); + assertEquals(3, transactionManager.sequenceNumber(tp1)); transactionManager.requestIdempotentEpochBumpForPartition(tp0); transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); - assertEquals(transactionManager.sequenceNumber(tp0), 0); - assertEquals(transactionManager.sequenceNumber(tp1), 3); + assertEquals(0, transactionManager.sequenceNumber(tp0)); + assertEquals(3, transactionManager.sequenceNumber(tp1)); } @Test @@ -1038,8 +1055,8 @@ public void testTransactionManagerDisablesV2() { .setMaxVersionLevel((short) 1) .setMinVersionLevel((short) 1)), 0)); - this.transactionManager = new TransactionManager(logContext, transactionalId, - transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions); + this.transactionManager = new TestableTransactionManager(logContext, transactionalId, + transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions, false); int batchSize = 16 * 1024; int deliveryTimeoutMs = 3000; @@ -1063,7 +1080,7 @@ MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), thi public void testDisconnectAndRetry() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - transactionManager.initializeTransactions(); + transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, true, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) == null); @@ -1076,15 +1093,15 @@ public void testDisconnectAndRetry() { public void testInitializeTransactionsTwiceRaisesError() { doInitTransactions(producerId, epoch); assertTrue(transactionManager.hasProducerId()); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions()); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); } @Test public void testUnsupportedFindCoordinator() { - transactionManager.initializeTransactions(); + transactionManager.initializeTransactions(false); client.prepareUnsupportedVersionResponse(body -> { FindCoordinatorRequest findCoordinatorRequest = (FindCoordinatorRequest) body; - assertEquals(CoordinatorType.forId(findCoordinatorRequest.data().keyType()), CoordinatorType.TRANSACTION); + assertEquals(CoordinatorType.TRANSACTION, CoordinatorType.forId(findCoordinatorRequest.data().keyType())); assertTrue(findCoordinatorRequest.data().key().isEmpty()); assertEquals(1, findCoordinatorRequest.data().coordinatorKeys().size()); assertTrue(findCoordinatorRequest.data().coordinatorKeys().contains(transactionalId)); @@ -1098,7 +1115,7 @@ public void testUnsupportedFindCoordinator() { @Test public void testUnsupportedInitTransactions() { - transactionManager.initializeTransactions(); + transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertFalse(transactionManager.hasError()); @@ -1243,7 +1260,7 @@ public void testIllegalGenerationInTxnOffsetCommitByGroupMetadata() { public void testLookupCoordinatorOnDisconnectAfterSend() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1275,7 +1292,7 @@ public void testLookupCoordinatorOnDisconnectAfterSend() { public void testLookupCoordinatorOnDisconnectBeforeSend() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1306,7 +1323,7 @@ public void testLookupCoordinatorOnDisconnectBeforeSend() { public void testLookupCoordinatorOnNotCoordinatorError() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1331,7 +1348,7 @@ public void testLookupCoordinatorOnNotCoordinatorError() { @Test public void testTransactionalIdAuthorizationFailureInFindCoordinator() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, false, CoordinatorType.TRANSACTION, transactionalId); @@ -1346,7 +1363,7 @@ public void testTransactionalIdAuthorizationFailureInFindCoordinator() { @Test public void testTransactionalIdAuthorizationFailureInInitProducerId() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1646,7 +1663,7 @@ public void testRetryAbortTransactionAfterTimeout() throws Exception { assertFalse(result.isAcked()); assertFalse(transactionManager.hasOngoingTransaction()); - assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); assertThrows(IllegalStateException.class, transactionManager::beginTransaction); assertThrows(IllegalStateException.class, transactionManager::beginCommit); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); @@ -1680,7 +1697,7 @@ public void testRetryCommitTransactionAfterTimeout() throws Exception { assertFalse(result.isAcked()); assertFalse(transactionManager.hasOngoingTransaction()); - assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); assertThrows(IllegalStateException.class, transactionManager::beginTransaction); assertThrows(IllegalStateException.class, transactionManager::beginAbort); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); @@ -1694,7 +1711,7 @@ public void testRetryCommitTransactionAfterTimeout() throws Exception { @Test public void testRetryInitTransactionsAfterTimeout() { - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1715,10 +1732,10 @@ public void testRetryInitTransactionsAfterTimeout() { assertThrows(IllegalStateException.class, transactionManager::beginCommit); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); - assertSame(result, transactionManager.initializeTransactions()); + assertSame(result, transactionManager.initializeTransactions(false)); result.await(); assertTrue(result.isAcked()); - assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); transactionManager.beginTransaction(); assertTrue(transactionManager.hasOngoingTransaction()); @@ -1960,7 +1977,7 @@ public void testMultipleAddPartitionsPerForOneProduce() throws InterruptedExcept }) public void testRetriableErrors(Errors error) { // Ensure FindCoordinator retries. - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(error, false, CoordinatorType.TRANSACTION, transactionalId); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); @@ -1994,7 +2011,7 @@ public void testRetriableErrors(Errors error) { @Test public void testCoordinatorNotAvailable() { // Ensure FindCoordinator with COORDINATOR_NOT_AVAILABLE error retries. - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, false, CoordinatorType.TRANSACTION, transactionalId); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); @@ -2017,7 +2034,7 @@ public void testInvalidProducerEpochConvertToProducerFencedInInitProducerId() { } private void verifyProducerFencedForInitProducerId(Errors error) { - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -2940,7 +2957,7 @@ public void testDropCommitOnBatchExpiry() throws InterruptedException { "Expected to get a TimeoutException since the queued ProducerBatch should have been expired"); runUntil(commitResult::isCompleted); // the commit shouldn't be completed without being sent since the produce request failed. assertFalse(commitResult.isSuccessful()); // the commit shouldn't succeed since the produce request failed. - assertThrows(TimeoutException.class, commitResult::await); + assertInstanceOf(TimeoutException.class, assertThrows(TransactionAbortableException.class, commitResult::await).getCause()); assertTrue(transactionManager.hasAbortableError()); assertTrue(transactionManager.hasOngoingTransaction()); @@ -3138,7 +3155,7 @@ public void testEpochBumpAfterLastInFlightBatchFailsIdempotentProducer(boolean t @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testMaybeResolveSequencesTransactionalProducer(boolean transactionV2Enabled) throws Exception { + public void testMaybeResolveSequencesTransactionalProducer(boolean transactionV2Enabled) { initializeTransactionManager(Optional.of(transactionalId), transactionV2Enabled); // Initialize transaction with initial producer ID and epoch. @@ -3802,7 +3819,7 @@ public void testBackgroundInvalidStateTransitionIsFatal() { doInitTransactions(); assertTrue(transactionManager.isTransactional()); - transactionManager.setPoisonStateOnInvalidTransition(true); + transactionManager.setShouldPoisonStateOnInvalidTransitionOverride(true); // Intentionally perform an operation that will cause an invalid state transition. The detection of this // will result in a poisoning of the transaction manager for all subsequent transactional operations since @@ -3815,7 +3832,7 @@ public void testBackgroundInvalidStateTransitionIsFatal() { assertThrows(IllegalStateException.class, () -> transactionManager.beginAbort()); assertThrows(IllegalStateException.class, () -> transactionManager.beginCommit()); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions()); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); assertThrows(IllegalStateException.class, () -> transactionManager.sendOffsetsToTransaction(Collections.emptyMap(), new ConsumerGroupMetadata("fake-group-id"))); } @@ -3852,7 +3869,7 @@ public void testForegroundInvalidStateTransitionIsRecoverable() { @Test public void testTransactionAbortableExceptionInInitProducerId() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -4007,6 +4024,137 @@ private void verifyCommitOrAbortTransactionRetriable(TransactionResult firstTran assertFalse(transactionManager.hasOngoingTransaction()); } + @Test + public void testInitializeTransactionsWithKeepPreparedTxn() { + doInitTransactionsWith2PCEnabled(true); + runUntil(transactionManager::hasProducerId); + + // Expect a bumped epoch in the response. + assertTrue(transactionManager.hasProducerId()); + assertFalse(transactionManager.hasOngoingTransaction()); + assertEquals(ongoingProducerId, transactionManager.producerIdAndEpoch().producerId); + assertEquals(bumpedOngoingEpoch, transactionManager.producerIdAndEpoch().epoch); + } + + @Test + public void testPrepareTransaction() { + doInitTransactionsWith2PCEnabled(false); + runUntil(transactionManager::hasProducerId); + + // Begin a transaction + transactionManager.beginTransaction(); + assertTrue(transactionManager.hasOngoingTransaction()); + + // Add a partition to the transaction + transactionManager.maybeAddPartition(tp0); + + // Capture the current producer ID and epoch before preparing the response + long producerId = transactionManager.producerIdAndEpoch().producerId; + short epoch = transactionManager.producerIdAndEpoch().epoch; + + // Simulate a produce request + try { + // Prepare the response before sending to ensure it's ready + prepareProduceResponse(Errors.NONE, producerId, epoch); + + appendToAccumulator(tp0); + // Wait until the request is processed + runUntil(() -> !client.hasPendingResponses()); + } catch (InterruptedException e) { + fail("Unexpected interruption: " + e); + } + + transactionManager.prepareTransaction(); + assertTrue(transactionManager.isPrepared()); + + ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); + // Validate the state contains the correct producer ID and epoch + assertEquals(producerId, preparedState.producerId); + assertEquals(epoch, preparedState.epoch); + } + + @Test + public void testInitPidResponseWithKeepPreparedTrueAndOngoingTransaction() { + // Initialize transaction manager with 2PC enabled + initializeTransactionManager(Optional.of(transactionalId), true, true); + + // Start initializeTransactions with keepPreparedTxn=true + TransactionalRequestResult result = transactionManager.initializeTransactions(true); + + // Prepare coordinator response + prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); + runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); + + // Simulate InitProducerId response with ongoing transaction + long ongoingPid = 12345L; + short ongoingEpoch = 5; + prepareInitPidResponse( + Errors.NONE, + false, + producerId, + epoch, + true, + true, + ongoingPid, + ongoingEpoch + ); + + runUntil(transactionManager::hasProducerId); + transactionManager.maybeUpdateTransactionV2Enabled(true); + + result.await(); + assertTrue(result.isSuccessful()); + + // Verify transaction manager transitioned to PREPARED_TRANSACTION state + assertTrue(transactionManager.isPrepared()); + + // Verify preparedTxnState was set with ongoing producer ID and epoch + ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); + assertNotNull(preparedState); + assertEquals(ongoingPid, preparedState.producerId); + assertEquals(ongoingEpoch, preparedState.epoch); + } + + @Test + public void testInitPidResponseWithKeepPreparedTrueAndNoOngoingTransaction() { + // Initialize transaction manager without 2PC enabled + // keepPrepared can be true even when enable2Pc is false, and we expect the same behavior + initializeTransactionManager(Optional.of(transactionalId), true, false); + + // Start initializeTransactions with keepPreparedTxn=true + TransactionalRequestResult result = transactionManager.initializeTransactions(true); + + // Prepare coordinator response + prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); + runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); + + // Simulate InitProducerId response without ongoing transaction + prepareInitPidResponse( + Errors.NONE, + false, + producerId, + epoch, + true, + false, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH + ); + + runUntil(transactionManager::hasProducerId); + transactionManager.maybeUpdateTransactionV2Enabled(true); + + result.await(); + assertTrue(result.isSuccessful()); + + // Verify transaction manager transitioned to READY state (not PREPARED_TRANSACTION) + assertFalse(transactionManager.isPrepared()); + assertTrue(transactionManager.isReady()); + + // Verify preparedTxnState was not set or is empty + ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); + assertEquals(ProducerIdAndEpoch.NONE, preparedState); + } + private void prepareAddPartitionsToTxn(final Map errors) { AddPartitionsToTxnResult result = AddPartitionsToTxnResponse.resultForTransaction(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID, errors); AddPartitionsToTxnResponseData data = new AddPartitionsToTxnResponseData().setResultsByTopicV3AndBelow(result.topicResults()).setThrottleTimeMs(0); @@ -4035,16 +4183,39 @@ private void prepareFindCoordinatorResponse(Errors error, boolean shouldDisconne }, FindCoordinatorResponse.prepareResponse(error, coordinatorKey, brokerNode), shouldDisconnect); } - private void prepareInitPidResponse(Errors error, boolean shouldDisconnect, long producerId, short producerEpoch) { + private void prepareInitPidResponse( + Errors error, + boolean shouldDisconnect, + long producerId, + short producerEpoch + ) { + prepareInitPidResponse(error, shouldDisconnect, producerId, producerEpoch, false, false, -1, (short) -1); + } + + private void prepareInitPidResponse( + Errors error, + boolean shouldDisconnect, + long producerId, + short producerEpoch, + boolean keepPreparedTxn, + boolean enable2Pc, + long ongoingProducerId, + short ongoingProducerEpoch + ) { InitProducerIdResponseData responseData = new InitProducerIdResponseData() - .setErrorCode(error.code()) - .setProducerEpoch(producerEpoch) - .setProducerId(producerId) - .setThrottleTimeMs(0); + .setErrorCode(error.code()) + .setProducerEpoch(producerEpoch) + .setProducerId(producerId) + .setThrottleTimeMs(0) + .setOngoingTxnProducerId(ongoingProducerId) + .setOngoingTxnProducerEpoch(ongoingProducerEpoch); + client.prepareResponse(body -> { InitProducerIdRequest initProducerIdRequest = (InitProducerIdRequest) body; assertEquals(transactionalId, initProducerIdRequest.data().transactionalId()); assertEquals(transactionTimeoutMs, initProducerIdRequest.data().transactionTimeoutMs()); + assertEquals(keepPreparedTxn, initProducerIdRequest.data().keepPreparedTxn()); + assertEquals(enable2Pc, initProducerIdRequest.data().enable2Pc()); return true; }, new InitProducerIdResponse(responseData), shouldDisconnect); } @@ -4285,7 +4456,7 @@ private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors e @SuppressWarnings("deprecation") private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, int logStartOffset) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); - Map partResp = singletonMap(tp, resp); + Map partResp = singletonMap(new TopicIdPartition(TOPIC_ID, tp), resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -4309,7 +4480,7 @@ private void doInitTransactions() { } private void doInitTransactions(long producerId, short epoch) { - TransactionalRequestResult result = transactionManager.initializeTransactions(); + TransactionalRequestResult result = transactionManager.initializeTransactions(false); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -4323,6 +4494,48 @@ private void doInitTransactions(long producerId, short epoch) { assertTrue(result.isAcked()); } + private void doInitTransactionsWith2PCEnabled(boolean keepPrepared) { + initializeTransactionManager(Optional.of(transactionalId), true, true); + TransactionalRequestResult result = transactionManager.initializeTransactions(keepPrepared); + + prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); + runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); + assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); + + if (keepPrepared) { + // Simulate an ongoing prepared transaction (ongoingProducerId != -1). + short ongoingEpoch = bumpedOngoingEpoch - 1; + prepareInitPidResponse( + Errors.NONE, + false, + ongoingProducerId, + bumpedOngoingEpoch, + true, + true, + ongoingProducerId, + ongoingEpoch + ); + } else { + prepareInitPidResponse( + Errors.NONE, + false, + producerId, + epoch, + false, + true, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH + ); + } + + runUntil(transactionManager::hasProducerId); + transactionManager.maybeUpdateTransactionV2Enabled(true); + + result.await(); + assertTrue(result.isSuccessful()); + assertTrue(result.isAcked()); + } + private void assertAbortableError(Class cause) { try { transactionManager.beginCommit(); @@ -4373,4 +4586,32 @@ private void runUntil(Supplier condition) { ProducerTestUtils.runUntil(sender, condition); } + /** + * This subclass exists only to optionally change the default behavior related to poisoning the state + * on invalid state transition attempts. + */ + private static class TestableTransactionManager extends TransactionManager { + + private Optional shouldPoisonStateOnInvalidTransitionOverride; + + public TestableTransactionManager(LogContext logContext, + String transactionalId, + int transactionTimeoutMs, + long retryBackoffMs, + ApiVersions apiVersions, + boolean enable2Pc) { + super(logContext, transactionalId, transactionTimeoutMs, retryBackoffMs, apiVersions, enable2Pc); + this.shouldPoisonStateOnInvalidTransitionOverride = Optional.empty(); + } + + private void setShouldPoisonStateOnInvalidTransitionOverride(boolean override) { + shouldPoisonStateOnInvalidTransitionOverride = Optional.of(override); + } + + @Override + protected boolean shouldPoisonStateOnInvalidTransition() { + // If there's an override, use it, otherwise invoke the default (i.e. super class) logic. + return shouldPoisonStateOnInvalidTransitionOverride.orElseGet(super::shouldPoisonStateOnInvalidTransition); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java index ceb819dee8f6a..5df435149deb7 100644 --- a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java @@ -318,7 +318,7 @@ public void testThenApplyOnSucceededFutureAndFunctionThrowsCompletionException() awaitAndAssertResult(future, 21, null); Throwable cause = awaitAndAssertFailure(dependantFuture, CompletionException.class, "java.lang.RuntimeException: We require more vespene gas"); assertInstanceOf(RuntimeException.class, cause.getCause()); - assertEquals(cause.getCause().getMessage(), "We require more vespene gas"); + assertEquals("We require more vespene gas", cause.getCause().getMessage()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/UuidTest.java b/clients/src/test/java/org/apache/kafka/common/UuidTest.java index 65316469c69e2..9acc8145be84a 100644 --- a/clients/src/test/java/org/apache/kafka/common/UuidTest.java +++ b/clients/src/test/java/org/apache/kafka/common/UuidTest.java @@ -35,8 +35,8 @@ public class UuidTest { public void testSignificantBits() { Uuid id = new Uuid(34L, 98L); - assertEquals(id.getMostSignificantBits(), 34L); - assertEquals(id.getLeastSignificantBits(), 98L); + assertEquals(34L, id.getMostSignificantBits()); + assertEquals(98L, id.getLeastSignificantBits()); } @Test @@ -74,15 +74,15 @@ public void testStringConversion() { String zeroIdString = Uuid.ZERO_UUID.toString(); - assertEquals(Uuid.fromString(zeroIdString), Uuid.ZERO_UUID); + assertEquals(Uuid.ZERO_UUID, Uuid.fromString(zeroIdString)); } @RepeatedTest(value = 100, name = RepeatedTest.LONG_DISPLAY_NAME) public void testRandomUuid() { Uuid randomID = Uuid.randomUuid(); - assertNotEquals(randomID, Uuid.ZERO_UUID); - assertNotEquals(randomID, Uuid.METADATA_TOPIC_ID); + assertNotEquals(Uuid.ZERO_UUID, randomID); + assertNotEquals(Uuid.METADATA_TOPIC_ID, randomID); assertFalse(randomID.toString().startsWith("-")); } diff --git a/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java b/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java index 8669156d7c390..b517f55534e67 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java @@ -379,8 +379,8 @@ public void testOriginalWithOverrides() { Properties props = new Properties(); props.put("config.providers", "file"); TestIndirectConfigResolution config = new TestIndirectConfigResolution(props); - assertEquals(config.originals().get("config.providers"), "file"); - assertEquals(config.originals(Collections.singletonMap("config.providers", "file2")).get("config.providers"), "file2"); + assertEquals("file", config.originals().get("config.providers")); + assertEquals("file2", config.originals(Collections.singletonMap("config.providers", "file2")).get("config.providers")); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index 6e1f0e232429b..c6c2390b07c47 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -135,12 +135,9 @@ private void testBadInputs(Type type, Object... values) { Map m = new HashMap<>(); m.put("name", value); ConfigDef def = new ConfigDef().define("name", type, Importance.HIGH, "docs"); - try { - def.parse(m); - fail("Expected a config exception on bad input for value " + value); - } catch (ConfigException e) { - // this is good - } + assertThrows(ConfigException.class, + () -> def.parse(m), + "Expected a config exception on bad input for value " + value); } } @@ -416,7 +413,7 @@ public void testNames() { .define("a", Type.STRING, Importance.LOW, "docs") .define("b", Type.STRING, Importance.LOW, "docs"); Set names = configDef.names(); - assertEquals(new HashSet<>(Arrays.asList("a", "b")), names); + assertEquals(Set.of("a", "b"), names); // should be unmodifiable try { names.add("new"); @@ -439,13 +436,13 @@ public void testBaseConfigDefDependents() { // Creating a ConfigDef based on another should compute the correct number of configs with no parent, even // if the base ConfigDef has already computed its parentless configs final ConfigDef baseConfigDef = new ConfigDef().define("a", Type.STRING, Importance.LOW, "docs"); - assertEquals(new HashSet<>(singletonList("a")), baseConfigDef.getConfigsWithNoParent()); + assertEquals(Set.of("a"), baseConfigDef.getConfigsWithNoParent()); final ConfigDef configDef = new ConfigDef(baseConfigDef) .define("parent", Type.STRING, Importance.HIGH, "parent docs", "group", 1, Width.LONG, "Parent", singletonList("child")) .define("child", Type.STRING, Importance.HIGH, "docs"); - assertEquals(new HashSet<>(Arrays.asList("a", "parent")), configDef.getConfigsWithNoParent()); + assertEquals(Set.of("a", "parent"), configDef.getConfigsWithNoParent()); } @@ -486,12 +483,9 @@ private void testValidators(Type type, Validator validator, Object defaultVal, O for (Object value : badValues) { Map m = new HashMap<>(); m.put("name", value); - try { - def.parse(m); - fail("Expected a config exception due to invalid value " + value); - } catch (ConfigException e) { - // this is good - } + assertThrows(ConfigException.class, + () -> def.parse(m), + "Expected a config exception due to invalid value " + value); } } @@ -764,4 +758,59 @@ public void testListSizeValidatorToString() { assertEquals("List containing maximum of 5 elements", ListSize.atMostOfSize(5).toString()); } + @Test + public void testListValidatorAnyNonDuplicateValues() { + ConfigDef.ValidList allowAnyNonDuplicateValues = ConfigDef.ValidList.anyNonDuplicateValues(true, true); + assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of("a", "b", "c"))); + assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of())); + assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", null)); + ConfigException exception1 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of("a", "a"))); + assertEquals("Configuration 'test.config' values must not be duplicated.", exception1.getMessage()); + ConfigException exception2 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of(""))); + assertEquals("Configuration 'test.config' values must not be empty.", exception2.getMessage()); + + ConfigDef.ValidList allowAnyNonDuplicateValuesAndNull = ConfigDef.ValidList.anyNonDuplicateValues(false, true); + assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of("a", "b", "c"))); + assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", null)); + ConfigException exception3 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of())); + assertEquals("Configuration 'test.config' must not be empty. Valid values include: any non-empty value", exception3.getMessage()); + ConfigException exception4 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of("a", "a"))); + assertEquals("Configuration 'test.config' values must not be duplicated.", exception4.getMessage()); + ConfigException exception5 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of(""))); + assertEquals("Configuration 'test.config' values must not be empty.", exception5.getMessage()); + + ConfigDef.ValidList allowAnyNonDuplicateValuesAndEmptyList = ConfigDef.ValidList.anyNonDuplicateValues(true, false); + assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of("a", "b", "c"))); + assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of())); + ConfigException exception6 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", null)); + assertEquals("Configuration 'test.config' values must not be null.", exception6.getMessage()); + ConfigException exception7 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of("a", "a"))); + assertEquals("Configuration 'test.config' values must not be duplicated.", exception7.getMessage()); + ConfigException exception8 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of(""))); + assertEquals("Configuration 'test.config' values must not be empty.", exception8.getMessage()); + } + + @Test + public void testListValidatorIn() { + ConfigDef.ValidList allowEmptyValidator = ConfigDef.ValidList.in(true, "a", "b", "c"); + assertDoesNotThrow(() -> allowEmptyValidator.ensureValid("test.config", List.of("a", "b"))); + assertDoesNotThrow(() -> allowEmptyValidator.ensureValid("test.config", List.of())); + ConfigException exception1 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", null)); + assertEquals("Configuration 'test.config' values must not be null.", exception1.getMessage()); + ConfigException exception2 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", List.of("d"))); + assertEquals("Invalid value d for configuration test.config: String must be one of: a, b, c", exception2.getMessage()); + ConfigException exception3 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", List.of("a", "a"))); + assertEquals("Configuration 'test.config' values must not be duplicated.", exception3.getMessage()); + + ConfigDef.ValidList notAllowEmptyValidator = ConfigDef.ValidList.in(false, "a", "b", "c"); + assertDoesNotThrow(() -> notAllowEmptyValidator.ensureValid("test.config", List.of("a", "b"))); + ConfigException exception4 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of())); + assertEquals("Configuration 'test.config' must not be empty. Valid values include: [a, b, c]", exception4.getMessage()); + ConfigException exception5 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", null)); + assertEquals("Configuration 'test.config' values must not be null.", exception5.getMessage()); + ConfigException exception6 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of("d"))); + assertEquals("Invalid value d for configuration test.config: String must be one of: a, b, c", exception6.getMessage()); + ConfigException exception7 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of("a", "a"))); + assertEquals("Configuration 'test.config' values must not be duplicated.", exception7.getMessage()); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java index bbd2268e7cb8f..9a31a63915d3d 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java @@ -22,10 +22,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -88,7 +86,7 @@ public void testGetOneKeyWithEmptyPath() { @Test void testGetEnvVarsByKeyList() { - Set keyList = new HashSet<>(Arrays.asList("test_var1", "secret_var2")); + Set keyList = Set.of("test_var1", "secret_var2"); Set keys = envVarConfigProvider.get(null, keyList).data().keySet(); assertEquals(keyList, keys); } diff --git a/clients/src/test/java/org/apache/kafka/common/errors/TransactionExceptionHierarchyTest.java b/clients/src/test/java/org/apache/kafka/common/errors/TransactionExceptionHierarchyTest.java index 1d6d47abc3395..9738ccd6c8180 100644 --- a/clients/src/test/java/org/apache/kafka/common/errors/TransactionExceptionHierarchyTest.java +++ b/clients/src/test/java/org/apache/kafka/common/errors/TransactionExceptionHierarchyTest.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.common.errors; +import org.apache.kafka.common.InvalidRecordException; + import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -94,4 +96,29 @@ void testApplicationRecoverableExceptionHierarchy(Class exc assertTrue(ApplicationRecoverableException.class.isAssignableFrom(exceptionClass), exceptionClass.getSimpleName() + " should extend ApplicationRecoverableException"); } + + /** + * Verifies that the given exception class extends `InvalidConfigurationException` + * + * @param exceptionClass the exception class to check + */ + @ParameterizedTest + @ValueSource(classes = { + AuthenticationException.class, + AuthorizationException.class, + ClusterAuthorizationException.class, + TransactionalIdAuthorizationException.class, + UnsupportedVersionException.class, + UnsupportedForMessageFormatException.class, + InvalidRecordException.class, + InvalidRequiredAcksException.class, + RecordBatchTooLargeException.class, + InvalidTopicException.class, + TopicAuthorizationException.class, + GroupAuthorizationException.class + }) + void testInvalidConfigurationExceptionHierarchy(Class exceptionClass) { + assertTrue(InvalidConfigurationException.class.isAssignableFrom(exceptionClass), + exceptionClass.getSimpleName() + " should extend InvalidConfigurationException"); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java b/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java index 1d6679e62ce2b..9bc6f05106ea8 100644 --- a/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java +++ b/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java @@ -130,7 +130,7 @@ public void testToString() { public void testEquals() { SupportedVersionRange tested = new SupportedVersionRange((short) 1, (short) 1); assertEquals(tested, tested); - assertNotEquals(tested, new SupportedVersionRange((short) 1, (short) 2)); + assertNotEquals(new SupportedVersionRange((short) 1, (short) 2), tested); assertNotEquals(null, tested); } diff --git a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java index 0cdb9b170f43b..41104194991d9 100644 --- a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java @@ -30,7 +30,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; public class RecordHeadersTest { @@ -48,6 +47,21 @@ public void testAdd() { assertEquals(2, getCount(headers)); } + @Test + public void testAddHeadersPreserveOrder() { + Headers headers = new RecordHeaders(); + headers.add(new RecordHeader("key", "value".getBytes())); + headers.add(new RecordHeader("key2", "value2".getBytes())); + headers.add(new RecordHeader("key3", "value3".getBytes())); + + Header[] headersArr = headers.toArray(); + assertHeader("key", "value", headersArr[0]); + assertHeader("key2", "value2", headersArr[1]); + assertHeader("key3", "value3", headersArr[2]); + + assertEquals(3, getCount(headers)); + } + @Test public void testRemove() { Headers headers = new RecordHeaders(); @@ -60,6 +74,27 @@ public void testRemove() { assertFalse(headers.iterator().hasNext()); } + @Test + public void testPreserveOrderAfterRemove() { + Headers headers = new RecordHeaders(); + headers.add(new RecordHeader("key", "value".getBytes())); + headers.add(new RecordHeader("key2", "value2".getBytes())); + headers.add(new RecordHeader("key3", "value3".getBytes())); + + headers.remove("key"); + Header[] headersArr = headers.toArray(); + assertHeader("key2", "value2", headersArr[0]); + assertHeader("key3", "value3", headersArr[1]); + assertEquals(2, getCount(headers)); + + headers.add(new RecordHeader("key4", "value4".getBytes())); + headers.remove("key3"); + headersArr = headers.toArray(); + assertHeader("key2", "value2", headersArr[0]); + assertHeader("key4", "value4", headersArr[1]); + assertEquals(2, getCount(headers)); + } + @Test public void testAddRemoveInterleaved() { Headers headers = new RecordHeaders(); @@ -73,7 +108,7 @@ public void testAddRemoveInterleaved() { assertEquals(1, getCount(headers)); headers.add(new RecordHeader("key3", "value3".getBytes())); - + assertNull(headers.lastHeader("key")); assertHeader("key2", "value2", headers.lastHeader("key2")); @@ -128,42 +163,44 @@ public void testLastHeader() { } + @Test + public void testHeadersIteratorRemove() { + Headers headers = new RecordHeaders(); + headers.add(new RecordHeader("key", "value".getBytes())); + + Iterator
    headersIterator = headers.headers("key").iterator(); + headersIterator.next(); + assertThrows(UnsupportedOperationException.class, + headersIterator::remove); + } + @Test public void testReadOnly() { RecordHeaders headers = new RecordHeaders(); headers.add(new RecordHeader("key", "value".getBytes())); Iterator
    headerIteratorBeforeClose = headers.iterator(); headers.setReadOnly(); - try { - headers.add(new RecordHeader("key", "value".getBytes())); - fail("IllegalStateException expected as headers are closed"); - } catch (IllegalStateException ise) { - //expected - } - - try { - headers.remove("key"); - fail("IllegalStateException expected as headers are closed"); - } catch (IllegalStateException ise) { - //expected - } - - try { - Iterator
    headerIterator = headers.iterator(); - headerIterator.next(); - headerIterator.remove(); - fail("IllegalStateException expected as headers are closed"); - } catch (IllegalStateException ise) { - //expected - } - - try { - headerIteratorBeforeClose.next(); - headerIteratorBeforeClose.remove(); - fail("IllegalStateException expected as headers are closed"); - } catch (IllegalStateException ise) { - //expected - } + + assertThrows(IllegalStateException.class, + () -> headers.add(new RecordHeader("key", "value".getBytes())), + "IllegalStateException expected as headers are closed."); + + assertThrows(IllegalStateException.class, + () -> headers.remove("key"), + "IllegalStateException expected as headers are closed."); + + Iterator
    headerIterator = headers.iterator(); + headerIterator.next(); + + assertThrows(IllegalStateException.class, + headerIterator::remove, + "IllegalStateException expected as headers are closed."); + + headerIteratorBeforeClose.next(); + + assertThrows(IllegalStateException.class, + headerIterator::remove, + "IllegalStateException expected as headers are closed."); } @Test @@ -222,7 +259,7 @@ public void shouldThrowNpeWhenAddingCollectionWithNullHeader() { private int getCount(Headers headers) { return headers.toArray().length; } - + static void assertHeader(String key, String value, Header actual) { assertEquals(key, actual.key()); assertArrayEquals(value.getBytes(), actual.value()); diff --git a/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java b/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java index d414d77bdba2e..0fd835d742e83 100644 --- a/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java +++ b/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java @@ -26,7 +26,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.Arrays; -import java.util.Collections; +import java.util.LinkedHashMap; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -129,7 +129,7 @@ public void testUsePluginMetricsAfterClose() throws Exception { Plugin plugin = Plugin.wrapInstance(new SomeMonitorablePlugin(), METRICS, CONFIG); PluginMetrics pluginMetrics = plugin.get().pluginMetrics; plugin.close(); - assertThrows(IllegalStateException.class, () -> pluginMetrics.metricName("", "", Collections.emptyMap())); + assertThrows(IllegalStateException.class, () -> pluginMetrics.metricName("", "", new LinkedHashMap<>())); assertThrows(IllegalStateException.class, () -> pluginMetrics.addMetric(null, null)); assertThrows(IllegalStateException.class, () -> pluginMetrics.removeMetric(null)); assertThrows(IllegalStateException.class, () -> pluginMetrics.addSensor("")); diff --git a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java index b28b8274f581f..0bcd9731c462d 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java @@ -56,11 +56,13 @@ import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.types.RawTaggedField; +import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; import com.fasterxml.jackson.databind.JsonNode; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; import java.lang.reflect.Method; import java.nio.ByteBuffer; @@ -82,7 +84,6 @@ public final class MessageTest { private final String memberId = "memberId"; private final String instanceId = "instanceId"; - private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(1, 2, 3, 4, 5, 6, 7); @Test public void testAddOffsetsToTxnVersions() throws Exception { @@ -409,90 +410,49 @@ public void testOffsetForLeaderEpochVersions() throws Exception { new OffsetForLeaderEpochRequestData().setReplicaId(-2)); } - @Test - public void testOffsetCommitRequestVersions() throws Exception { - String groupId = "groupId"; - String topicName = "topic"; - String metadata = "metadata"; - int partition = 2; - int offset = 100; - - testAllMessageRoundTrips(new OffsetCommitRequestData() - .setGroupId(groupId) - .setTopics(Collections.singletonList( - new OffsetCommitRequestTopic() - .setName(topicName) - .setPartitions(Collections.singletonList( - new OffsetCommitRequestPartition() - .setPartitionIndex(partition) - .setCommittedMetadata(metadata) - .setCommittedOffset(offset) - ))))); - - Supplier request = - () -> new OffsetCommitRequestData() - .setGroupId(groupId) - .setMemberId("memberId") - .setGroupInstanceId("instanceId") - .setTopics(Collections.singletonList( - new OffsetCommitRequestTopic() - .setName(topicName) - .setPartitions(Collections.singletonList( - new OffsetCommitRequestPartition() - .setPartitionIndex(partition) - .setCommittedLeaderEpoch(10) - .setCommittedMetadata(metadata) - .setCommittedOffset(offset) - )))) - .setRetentionTimeMs(20); - - for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { - OffsetCommitRequestData requestData = request.get(); - - if (version > 4) { - requestData.setRetentionTimeMs(-1); - } - - if (version < 6) { - requestData.topics().get(0).partitions().get(0).setCommittedLeaderEpoch(-1); - } - - if (version < 7) { - requestData.setGroupInstanceId(null); - } - - if (version >= 2 && version <= 4) { - testAllMessageRoundTripsBetweenVersions(version, (short) 5, requestData, requestData); - } else { - testAllMessageRoundTripsFromVersion(version, requestData); - } - } - } - - @Test - public void testOffsetCommitResponseVersions() throws Exception { - Supplier response = - () -> new OffsetCommitResponseData() - .setTopics( - singletonList( - new OffsetCommitResponseTopic() - .setName("topic") - .setPartitions(singletonList( - new OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) - )) - ) - ) - .setThrottleTimeMs(20); - - for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { - OffsetCommitResponseData responseData = response.get(); - if (version < 3) { - responseData.setThrottleTimeMs(0); - } - testAllMessageRoundTripsFromVersion(version, responseData); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + public void testOffsetCommitRequestVersions(short version) throws Exception { + OffsetCommitRequestData request = new OffsetCommitRequestData() + .setGroupId("groupId") + .setMemberId("memberId") + .setGenerationIdOrMemberEpoch(version >= 1 ? 10 : -1) + .setGroupInstanceId(version >= 7 ? "instanceId" : null) + .setRetentionTimeMs((version >= 2 && version <= 4) ? 20 : -1) + .setTopics(singletonList( + new OffsetCommitRequestTopic() + .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) + .setName(version < 10 ? "topic" : "") + .setPartitions(singletonList( + new OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedMetadata("metadata") + .setCommittedOffset(100) + .setCommittedLeaderEpoch(version >= 6 ? 10 : -1) + + )) + )); + + testMessageRoundTrip(version, request, request); + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + public void testOffsetCommitResponseVersions(short version) throws Exception { + OffsetCommitResponseData response = new OffsetCommitResponseData() + .setThrottleTimeMs(version >= 3 ? 20 : 0) + .setTopics(singletonList( + new OffsetCommitResponseTopic() + .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) + .setName(version < 10 ? "topic" : "") + .setPartitions(singletonList( + new OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) + )) + )); + + testMessageRoundTrip(version, response, response); } @Test @@ -583,296 +543,92 @@ public void testTxnOffsetCommitResponseVersions() throws Exception { .setThrottleTimeMs(20)); } - @Test - public void testOffsetFetchV1ToV7() throws Exception { - String groupId = "groupId"; - String topicName = "topic"; - - List topics = Collections.singletonList( - new OffsetFetchRequestTopic() - .setName(topicName) - .setPartitionIndexes(Collections.singletonList(5))); - testAllMessageRoundTripsOffsetFetchV0ToV7(new OffsetFetchRequestData() - .setTopics(new ArrayList<>()) - .setGroupId(groupId)); - - testAllMessageRoundTripsOffsetFetchV0ToV7(new OffsetFetchRequestData() - .setGroupId(groupId) - .setTopics(topics)); - - OffsetFetchRequestData allPartitionData = new OffsetFetchRequestData() - .setGroupId(groupId) - .setTopics(null); - - OffsetFetchRequestData requireStableData = new OffsetFetchRequestData() - .setGroupId(groupId) - .setTopics(topics) - .setRequireStable(true); - - for (int version : listOfVersionsNonBatchOffsetFetch) { - final short finalVersion = (short) version; - if (version < 2) { - assertThrows(NullPointerException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, allPartitionData)); - } else { - testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, allPartitionData); - } - - if (version < 7) { - assertThrows(UnsupportedVersionException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData)); - } else { - testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData); - } - } - - Supplier response = - () -> new OffsetFetchResponseData() - .setTopics(Collections.singletonList( - new OffsetFetchResponseTopic() - .setName(topicName) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartition() - .setPartitionIndex(5) - .setMetadata(null) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(3) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))))) - .setErrorCode(Errors.NOT_COORDINATOR.code()) - .setThrottleTimeMs(10); - for (int version : listOfVersionsNonBatchOffsetFetch) { - OffsetFetchResponseData responseData = response.get(); - if (version <= 1) { - responseData.setErrorCode(Errors.NONE.code()); - } - - if (version <= 2) { - responseData.setThrottleTimeMs(0); - } - - if (version <= 4) { - responseData.topics().get(0).partitions().get(0).setCommittedLeaderEpoch(-1); - } - - testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, responseData); - } - } - - private void testAllMessageRoundTripsOffsetFetchV0ToV7(Message message) throws Exception { - testDuplication(message); - testAllMessageRoundTripsOffsetFetchFromVersionToV7(message.lowestSupportedVersion(), message); - } - - private void testAllMessageRoundTripsOffsetFetchFromVersionToV7(short fromVersion, - Message message) throws Exception { - for (short version = fromVersion; version <= 7; version++) { - testEquivalentMessageRoundTrip(version, message); - } - } - - @Test - public void testOffsetFetchV8AndAboveSingleGroup() throws Exception { - String groupId = "groupId"; - String topicName = "topic"; + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testOffsetFetchRequestVersions(short version) throws Exception { + OffsetFetchRequestData request; - List topic = Collections.singletonList( - new OffsetFetchRequestTopics() - .setName(topicName) - .setPartitionIndexes(Collections.singletonList(5))); - - OffsetFetchRequestData allPartitionData = new OffsetFetchRequestData() - .setGroups(Collections.singletonList( - new OffsetFetchRequestGroup() - .setGroupId(groupId) - .setTopics(null))); - - OffsetFetchRequestData specifiedPartitionData = new OffsetFetchRequestData() - .setGroups(Collections.singletonList( - new OffsetFetchRequestGroup() - .setGroupId(groupId) - .setTopics(topic))) - .setRequireStable(true); - - testAllMessageRoundTripsOffsetFetchV8AndAbove(allPartitionData); - testAllMessageRoundTripsOffsetFetchV8AndAbove(specifiedPartitionData); - - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, specifiedPartitionData); - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, allPartitionData); - } + if (version < 8) { + request = new OffsetFetchRequestData() + .setGroupId("groupId") + .setRequireStable(version == 7) + .setTopics(List.of( + new OffsetFetchRequestTopic() + .setName("foo") + .setPartitionIndexes(List.of(0, 1, 2)) + )); + } else { + request = new OffsetFetchRequestData() + .setRequireStable(true) + .setGroups(List.of( + new OffsetFetchRequestGroup() + .setGroupId("groupId") + .setMemberId(version >= 9 ? "memberId" : null) + .setMemberEpoch(version >= 9 ? 10 : -1) + .setTopics(List.of( + new OffsetFetchRequestTopics() + .setName(version < 10 ? "foo" : "") + .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) + .setPartitionIndexes(List.of(0, 1, 2)) + )) + )); } - Supplier response = - () -> new OffsetFetchResponseData() - .setGroups(Collections.singletonList( + testMessageRoundTrip(version, request, request); + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testOffsetFetchResponseVersions(short version) throws Exception { + OffsetFetchResponseData response; + + if (version < 8) { + response = new OffsetFetchResponseData() + .setThrottleTimeMs(version >= 3 ? 1000 : 0) + .setErrorCode(version >= 2 ? Errors.INVALID_GROUP_ID.code() : 0) + .setTopics(List.of( + new OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setMetadata("meta") + .setCommittedLeaderEpoch(version >= 5 ? 20 : -1) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + )) + )); + } else { + response = new OffsetFetchResponseData() + .setThrottleTimeMs(1000) + .setGroups(List.of( new OffsetFetchResponseGroup() - .setGroupId(groupId) - .setTopics(Collections.singletonList( + .setGroupId("groupId") + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + .setTopics(List.of( new OffsetFetchResponseTopics() - .setPartitions(Collections.singletonList( + .setName(version < 10 ? "foo" : "") + .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) + .setPartitions(List.of( new OffsetFetchResponsePartitions() - .setPartitionIndex(5) - .setMetadata(null) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(3) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))))) - .setErrorCode(Errors.NOT_COORDINATOR.code()))) - .setThrottleTimeMs(10); - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - OffsetFetchResponseData responseData = response.get(); - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, responseData); - } - } - } - - @Test - public void testOffsetFetchV8AndAbove() throws Exception { - String groupOne = "group1"; - String groupTwo = "group2"; - String groupThree = "group3"; - String groupFour = "group4"; - String groupFive = "group5"; - String topic1 = "topic1"; - String topic2 = "topic2"; - String topic3 = "topic3"; - - OffsetFetchRequestTopics topicOne = new OffsetFetchRequestTopics() - .setName(topic1) - .setPartitionIndexes(Collections.singletonList(5)); - OffsetFetchRequestTopics topicTwo = new OffsetFetchRequestTopics() - .setName(topic2) - .setPartitionIndexes(Collections.singletonList(10)); - OffsetFetchRequestTopics topicThree = new OffsetFetchRequestTopics() - .setName(topic3) - .setPartitionIndexes(Collections.singletonList(15)); - - List groupOneTopics = singletonList(topicOne); - OffsetFetchRequestGroup group1 = - new OffsetFetchRequestGroup() - .setGroupId(groupOne) - .setTopics(groupOneTopics); - - List groupTwoTopics = Arrays.asList(topicOne, topicTwo); - OffsetFetchRequestGroup group2 = - new OffsetFetchRequestGroup() - .setGroupId(groupTwo) - .setTopics(groupTwoTopics); - - List groupThreeTopics = Arrays.asList(topicOne, topicTwo, topicThree); - OffsetFetchRequestGroup group3 = - new OffsetFetchRequestGroup() - .setGroupId(groupThree) - .setTopics(groupThreeTopics); - - OffsetFetchRequestGroup group4 = - new OffsetFetchRequestGroup() - .setGroupId(groupFour) - .setTopics(null); - - OffsetFetchRequestGroup group5 = - new OffsetFetchRequestGroup() - .setGroupId(groupFive) - .setTopics(null); - - OffsetFetchRequestData requestData = new OffsetFetchRequestData() - .setGroups(Arrays.asList(group1, group2, group3, group4, group5)) - .setRequireStable(true); - - testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData); - - testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData.setRequireStable(false)); - - - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, requestData); - } - } - - OffsetFetchResponseTopics responseTopic1 = - new OffsetFetchResponseTopics() - .setName(topic1) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartitions() - .setPartitionIndex(5) - .setMetadata(null) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(3) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))); - OffsetFetchResponseTopics responseTopic2 = - new OffsetFetchResponseTopics() - .setName(topic2) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartitions() - .setPartitionIndex(10) - .setMetadata("foo") - .setCommittedOffset(200) - .setCommittedLeaderEpoch(2) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()))); - OffsetFetchResponseTopics responseTopic3 = - new OffsetFetchResponseTopics() - .setName(topic3) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartitions() - .setPartitionIndex(15) - .setMetadata("bar") - .setCommittedOffset(300) - .setCommittedLeaderEpoch(1) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()))); - - OffsetFetchResponseGroup responseGroup1 = - new OffsetFetchResponseGroup() - .setGroupId(groupOne) - .setTopics(Collections.singletonList(responseTopic1)) - .setErrorCode(Errors.NOT_COORDINATOR.code()); - OffsetFetchResponseGroup responseGroup2 = - new OffsetFetchResponseGroup() - .setGroupId(groupTwo) - .setTopics(Arrays.asList(responseTopic1, responseTopic2)) - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()); - OffsetFetchResponseGroup responseGroup3 = - new OffsetFetchResponseGroup() - .setGroupId(groupThree) - .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) - .setErrorCode(Errors.NONE.code()); - OffsetFetchResponseGroup responseGroup4 = - new OffsetFetchResponseGroup() - .setGroupId(groupFour) - .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) - .setErrorCode(Errors.NONE.code()); - OffsetFetchResponseGroup responseGroup5 = - new OffsetFetchResponseGroup() - .setGroupId(groupFive) - .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) - .setErrorCode(Errors.NONE.code()); - - Supplier response = - () -> new OffsetFetchResponseData() - .setGroups(Arrays.asList(responseGroup1, responseGroup2, responseGroup3, - responseGroup4, responseGroup5)) - .setThrottleTimeMs(10); - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - OffsetFetchResponseData responseData = response.get(); - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, responseData); - } + .setPartitionIndex(0) + .setCommittedOffset(10) + .setMetadata("meta") + .setCommittedLeaderEpoch(20) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + )) + )) + )); } - } - - private void testAllMessageRoundTripsOffsetFetchV8AndAbove(Message message) throws Exception { - testDuplication(message); - testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove((short) 8, message); - } - private void testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(short fromVersion, Message message) throws Exception { - for (short version = fromVersion; version <= message.highestSupportedVersion(); version++) { - testEquivalentMessageRoundTrip(version, message); - } + testMessageRoundTrip(version, response, response); } @Test public void testProduceResponseVersions() throws Exception { String topicName = "topic"; + Uuid topicId = Uuid.fromString("klZ9sa2rSvig6QpgGXzALT"); + int partitionIndex = 0; short errorCode = Errors.INVALID_TOPIC_EXCEPTION.code(); long baseOffset = 12L; @@ -886,7 +642,6 @@ public void testProduceResponseVersions() throws Exception { testAllMessageRoundTrips(new ProduceResponseData() .setResponses(new ProduceResponseData.TopicProduceResponseCollection(singletonList( new ProduceResponseData.TopicProduceResponse() - .setName(topicName) .setPartitionResponses(singletonList( new ProduceResponseData.PartitionProduceResponse() .setIndex(partitionIndex) @@ -896,7 +651,6 @@ public void testProduceResponseVersions() throws Exception { Supplier response = () -> new ProduceResponseData() .setResponses(new ProduceResponseData.TopicProduceResponseCollection(singletonList( new ProduceResponseData.TopicProduceResponse() - .setName(topicName) .setPartitionResponses(singletonList( new ProduceResponseData.PartitionProduceResponse() .setIndex(partitionIndex) @@ -931,10 +685,18 @@ public void testProduceResponseVersions() throws Exception { responseData.setThrottleTimeMs(0); } + if (version >= 13) { + responseData.responses().iterator().next().setTopicId(topicId); + } else { + responseData.responses().iterator().next().setName(topicName); + } + if (version >= 3 && version <= 4) { testAllMessageRoundTripsBetweenVersions(version, (short) 5, responseData, responseData); } else if (version >= 6 && version <= 7) { testAllMessageRoundTripsBetweenVersions(version, (short) 8, responseData, responseData); + } else if (version <= 12) { + testAllMessageRoundTripsBetweenVersions(version, (short) 12, responseData, responseData); } else { testEquivalentMessageRoundTrip(version, responseData); } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/KafkaMetricTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/KafkaMetricTest.java index ab6f349ac53be..e3a9fb345d795 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/KafkaMetricTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/KafkaMetricTest.java @@ -48,4 +48,29 @@ public void testIsMeasurableWithGaugeProvider() { assertThrows(IllegalStateException.class, metric::measurable); } + @Test + public void testMeasurableValueReturnsZeroWhenNotMeasurable() { + MockTime time = new MockTime(); + MetricConfig config = new MetricConfig(); + Gauge gauge = (c, now) -> 7; + + KafkaMetric metric = new KafkaMetric(new Object(), METRIC_NAME, gauge, config, time); + assertEquals(0.0d, metric.measurableValue(time.milliseconds()), 0.0d); + } + + @Test + public void testKafkaMetricAcceptsNonMeasurableNonGaugeProvider() { + MetricValueProvider provider = (config, now) -> "metric value provider"; + KafkaMetric metric = new KafkaMetric(new Object(), METRIC_NAME, provider, new MetricConfig(), new MockTime()); + + Object value = metric.metricValue(); + assertEquals("metric value provider", value); + } + + @Test + public void testConstructorWithNullProvider() { + assertThrows(NullPointerException.class, () -> + new KafkaMetric(new Object(), METRIC_NAME, null, new MetricConfig(), new MockTime()) + ); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java index 8f50e35b6e3f8..eda6648068c6f 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java @@ -64,7 +64,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; public class MetricsTest { private static final Logger log = LoggerFactory.getLogger(MetricsTest.class); @@ -98,12 +97,9 @@ public void testMetricName() { MetricName n2 = metrics.metricName("name", "group", "description", tags); assertEquals(n1, n2, "metric names created in two different ways should be equal"); - try { - metrics.metricName("name", "group", "description", "key1"); - fail("Creating MetricName with an odd number of keyValue should fail"); - } catch (IllegalArgumentException e) { - // this is expected - } + assertThrows(IllegalArgumentException.class, + () -> metrics.metricName("name", "group", "description", "key1"), + "Creating MetricName with an odd number of keyValue should fail, IllegalArgumentException expected."); } @Test @@ -419,20 +415,14 @@ public void testQuotas() { sensor.add(metrics.metricName("test1.total", "grp1"), new CumulativeSum(), new MetricConfig().quota(Quota.upperBound(5.0))); sensor.add(metrics.metricName("test2.total", "grp1"), new CumulativeSum(), new MetricConfig().quota(Quota.lowerBound(0.0))); sensor.record(5.0); - try { - sensor.record(1.0); - fail("Should have gotten a quota violation."); - } catch (QuotaViolationException e) { - // this is good - } + assertThrows(QuotaViolationException.class, + () -> sensor.record(1.0), + "Should have gotten a quota violation."); assertEquals(6.0, (Double) metrics.metrics().get(metrics.metricName("test1.total", "grp1")).metricValue(), EPS); sensor.record(-6.0); - try { - sensor.record(-1.0); - fail("Should have gotten a quota violation."); - } catch (QuotaViolationException e) { - // this is good - } + assertThrows(QuotaViolationException.class, + () -> sensor.record(-1.0), + "Should have gotten a quota violation."); } @Test @@ -670,7 +660,7 @@ private void record(Rate rate, MetricConfig config, int value) { private Double measure(Measurable rate, MetricConfig config) { return rate.measure(config, time.milliseconds()); } - + @Test public void testMetricInstances() { MetricName n1 = metrics.metricInstance(SampleMetrics.METRIC1, "key1", "value1", "key2", "value2"); @@ -680,13 +670,10 @@ public void testMetricInstances() { MetricName n2 = metrics.metricInstance(SampleMetrics.METRIC2, tags); assertEquals(n1, n2, "metric names created in two different ways should be equal"); - try { - metrics.metricInstance(SampleMetrics.METRIC1, "key1"); - fail("Creating MetricName with an odd number of keyValue should fail"); - } catch (IllegalArgumentException e) { - // this is expected - } - + assertThrows(IllegalArgumentException.class, + () -> metrics.metricInstance(SampleMetrics.METRIC1, "key1"), + "Creating MetricName with an odd number of keyValue should fail, IllegalArgumentException expected."); + Map parentTagsWithValues = new HashMap<>(); parentTagsWithValues.put("parent-tag", "parent-tag-value"); @@ -697,27 +684,20 @@ public void testMetricInstances() { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues); Map filledOutTags = inheritedMetric.tags(); - assertEquals(filledOutTags.get("parent-tag"), "parent-tag-value", "parent-tag should be set properly"); - assertEquals(filledOutTags.get("child-tag"), "child-tag-value", "child-tag should be set properly"); + assertEquals("parent-tag-value", filledOutTags.get("parent-tag"), "parent-tag should be set properly"); + assertEquals("child-tag-value", filledOutTags.get("child-tag"), "child-tag should be set properly"); - try { - inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, parentTagsWithValues); - fail("Creating MetricName should fail if the child metrics are not defined at runtime"); - } catch (IllegalArgumentException e) { - // this is expected - } + assertThrows(IllegalArgumentException.class, + () -> inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, parentTagsWithValues), + "Creating MetricName should throw IllegalArgumentException if the child metrics are not defined at runtime."); - try { - - Map runtimeTags = new HashMap<>(); - runtimeTags.put("child-tag", "child-tag-value"); - runtimeTags.put("tag-not-in-template", "unexpected-value"); + Map runtimeTags = new HashMap<>(); + runtimeTags.put("child-tag", "child-tag-value"); + runtimeTags.put("tag-not-in-template", "unexpected-value"); - inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, runtimeTags); - fail("Creating MetricName should fail if there is a tag at runtime that is not in the template"); - } catch (IllegalArgumentException e) { - // this is expected - } + assertThrows(IllegalArgumentException.class, + () -> inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, runtimeTags), + "Creating MetricName should throw IllegalArgumentException if there is a tag at runtime that is not in the template."); } } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java index 5cdcebc858d6b..6b806c6bb7bd5 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java @@ -70,12 +70,12 @@ public void testRecordLevelEnum() { assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); assertTrue(Sensor.RecordingLevel.TRACE.shouldRecord(configLevel.id)); - assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString()), - Sensor.RecordingLevel.DEBUG); - assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString()), - Sensor.RecordingLevel.INFO); - assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.TRACE.toString()), - Sensor.RecordingLevel.TRACE); + assertEquals(Sensor.RecordingLevel.DEBUG, + Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString())); + assertEquals(Sensor.RecordingLevel.INFO, + Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString())); + assertEquals(Sensor.RecordingLevel.TRACE, + Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.TRACE.toString())); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java index 0ff349e361d6a..4d0bedb3d3061 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java @@ -27,7 +27,6 @@ import org.junit.jupiter.api.Test; import java.io.IOException; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; @@ -36,11 +35,15 @@ public class PluginMetricsImplTest { - private final Map extraTags = Collections.singletonMap("my-tag", "my-value"); + private static final LinkedHashMap EXTRA_TAGS = new LinkedHashMap<>(); private Map tags; private Metrics metrics; private int initialMetrics; + static { + EXTRA_TAGS.put("my-tag", "my-value"); + } + @BeforeEach void setup() { metrics = new Metrics(); @@ -53,26 +56,28 @@ void setup() { @Test void testMetricName() { PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); - MetricName metricName = pmi.metricName("name", "description", extraTags); + MetricName metricName = pmi.metricName("name", "description", EXTRA_TAGS); assertEquals("name", metricName.name()); assertEquals("plugins", metricName.group()); assertEquals("description", metricName.description()); Map expectedTags = new LinkedHashMap<>(tags); - expectedTags.putAll(extraTags); + expectedTags.putAll(EXTRA_TAGS); assertEquals(expectedTags, metricName.tags()); } @Test void testDuplicateTagName() { PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + LinkedHashMap tags = new LinkedHashMap<>(); + tags.put("k1", "value"); assertThrows(IllegalArgumentException.class, - () -> pmi.metricName("name", "description", Collections.singletonMap("k1", "value"))); + () -> pmi.metricName("name", "description", tags)); } @Test void testAddRemoveMetrics() { PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); - MetricName metricName = pmi.metricName("name", "description", extraTags); + MetricName metricName = pmi.metricName("name", "description", EXTRA_TAGS); pmi.addMetric(metricName, (Measurable) (config, now) -> 0.0); assertEquals(initialMetrics + 1, metrics.metrics().size()); @@ -88,7 +93,7 @@ void testAddRemoveMetrics() { void testAddRemoveSensor() { PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); String sensorName = "my-sensor"; - MetricName metricName = pmi.metricName("name", "description", extraTags); + MetricName metricName = pmi.metricName("name", "description", EXTRA_TAGS); Sensor sensor = pmi.addSensor(sensorName); assertEquals(initialMetrics, metrics.metrics().size()); sensor.add(metricName, new Rate()); @@ -107,10 +112,10 @@ void testAddRemoveSensor() { void testClose() throws IOException { PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); String sensorName = "my-sensor"; - MetricName metricName1 = pmi.metricName("name1", "description", extraTags); + MetricName metricName1 = pmi.metricName("name1", "description", EXTRA_TAGS); Sensor sensor = pmi.addSensor(sensorName); sensor.add(metricName1, new Rate()); - MetricName metricName2 = pmi.metricName("name2", "description", extraTags); + MetricName metricName2 = pmi.metricName("name2", "description", EXTRA_TAGS); pmi.addMetric(metricName2, (Measurable) (config, now) -> 1.0); assertEquals(initialMetrics + 2, metrics.metrics().size()); diff --git a/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java b/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java index e84c7c5e7c2e0..9aa90811b95ad 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java @@ -62,10 +62,10 @@ public void testChannelBuilderConfigs() { assertNull(configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.sasl.kerberos.service.name")); - assertEquals(configs.get("gssapi.sasl.kerberos.service.name"), "testkafka"); + assertEquals("testkafka", configs.get("gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("gssapi.sasl.kerberos.service.name")); - assertEquals(configs.get("sasl.kerberos.service.name"), "testkafkaglobal"); + assertEquals("testkafkaglobal", configs.get("sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("sasl.kerberos.service.name")); assertNull(configs.get("listener.name.listener1.sasl.kerberos.service.name")); @@ -74,35 +74,35 @@ public void testChannelBuilderConfigs() { assertNull(configs.get("plain.sasl.server.callback.handler.class")); assertFalse(securityConfig.unused().contains("plain.sasl.server.callback.handler.class")); - assertEquals(configs.get("listener.name.listener1.gssapi.config1.key"), "custom.config1"); + assertEquals("custom.config1", configs.get("listener.name.listener1.gssapi.config1.key")); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.config1.key")); - assertEquals(configs.get("custom.config2.key"), "custom.config2"); + assertEquals("custom.config2", configs.get("custom.config2.key")); assertFalse(securityConfig.unused().contains("custom.config2.key")); // test configs without listener prefix securityConfig = new TestSecurityConfig(props); configs = ChannelBuilders.channelBuilderConfigs(securityConfig, null); - assertEquals(configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name"), "testkafka"); + assertEquals("testkafka", configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.sasl.kerberos.service.name")); assertNull(configs.get("gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("gssapi.sasl.kerberos.service.name")); - assertEquals(configs.get("listener.name.listener1.sasl.kerberos.service.name"), "testkafkaglobal"); + assertEquals("testkafkaglobal", configs.get("listener.name.listener1.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("listener.name.listener1.sasl.kerberos.service.name")); assertNull(configs.get("sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("sasl.kerberos.service.name")); - assertEquals(configs.get("plain.sasl.server.callback.handler.class"), "callback"); + assertEquals("callback", configs.get("plain.sasl.server.callback.handler.class")); assertFalse(securityConfig.unused().contains("plain.sasl.server.callback.handler.class")); - assertEquals(configs.get("listener.name.listener1.gssapi.config1.key"), "custom.config1"); + assertEquals("custom.config1", configs.get("listener.name.listener1.gssapi.config1.key")); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.config1.key")); - assertEquals(configs.get("custom.config2.key"), "custom.config2"); + assertEquals("custom.config2", configs.get("custom.config2.key")); assertFalse(securityConfig.unused().contains("custom.config2.key")); } @@ -118,5 +118,15 @@ public void configure(Map configs) { public KafkaPrincipal build(AuthenticationContext context) { return null; } + + @Override + public byte[] serialize(KafkaPrincipal principal) { + return new byte[0]; + } + + @Override + public KafkaPrincipal deserialize(byte[] bytes) { + return null; + } } } diff --git a/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java index 90dd34bb07835..df5e1aea7f8df 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java @@ -183,11 +183,6 @@ public void verifyReauthenticationMetrics(int successfulReauthentications, final } } - public void verifyAuthenticationNoReauthMetric(int successfulAuthenticationNoReauths) throws InterruptedException { - waitForMetrics("successful-authentication-no-reauth", successfulAuthenticationNoReauths, - EnumSet.of(MetricType.TOTAL)); - } - public void waitForMetric(String name, final double expectedValue) throws InterruptedException { waitForMetrics(name, expectedValue, EnumSet.of(MetricType.TOTAL, MetricType.RATE)); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java index 8a9704c16216e..572ec443e08af 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslSelectorTest.java @@ -74,11 +74,10 @@ public void setUp() throws Exception { this.server.start(); this.time = new MockTime(); sslClientConfigs = createSslClientConfigs(trustStoreFile); - LogContext logContext = new LogContext(); - this.channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false, logContext); + this.channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false); this.channelBuilder.configure(sslClientConfigs); this.metrics = new Metrics(); - this.selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, logContext); + this.selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext()); } protected abstract Map createSslClientConfigs(File trustStoreFile) throws GeneralSecurityException, IOException; @@ -255,7 +254,7 @@ public void testMuteOnOOM() throws Exception { .tlsProtocol(tlsProtocol) .createNewTrustStore(trustStoreFile) .build(); - channelBuilder = new SslChannelBuilder(ConnectionMode.SERVER, null, false, new LogContext()); + channelBuilder = new SslChannelBuilder(ConnectionMode.SERVER, null, false); channelBuilder.configure(sslServerConfigs); selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<>(), true, false, channelBuilder, pool, new LogContext()); @@ -342,7 +341,7 @@ private SslSender createSender(String tlsProtocol, InetSocketAddress serverAddre private static class TestSslChannelBuilder extends SslChannelBuilder { public TestSslChannelBuilder(ConnectionMode connectionMode) { - super(connectionMode, null, false, new LogContext()); + super(connectionMode, null, false); } @Override diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java index 9208171d1a926..9525ccfbc82be 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java @@ -110,7 +110,8 @@ public Args(String tlsProtocol, boolean useInlinePem) throws Exception { this.useInlinePem = useInlinePem; sslConfigOverrides = new HashMap<>(); sslConfigOverrides.put(SslConfigs.SSL_PROTOCOL_CONFIG, tlsProtocol); - sslConfigOverrides.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsProtocol)); + sslConfigOverrides.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(tlsProtocol)); + sslConfigOverrides.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); init(); } @@ -607,7 +608,7 @@ public void testInvalidKeyPassword(Args args) throws Exception { public void testTlsDefaults(Args args) throws Exception { args.sslServerConfigs = args.serverCertStores.getTrustingConfig(args.clientCertStores); args.sslClientConfigs = args.clientCertStores.getTrustingConfig(args.serverCertStores); - + args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, args.sslServerConfigs.get(SslConfigs.SSL_PROTOCOL_CONFIG)); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, args.sslClientConfigs.get(SslConfigs.SSL_PROTOCOL_CONFIG)); @@ -766,11 +767,10 @@ public void testApplicationBufferResize(Args args) throws Exception { @ParameterizedTest @ArgumentsSource(SslTransportLayerArgumentsProvider.class) public void testNetworkThreadTimeRecorded(Args args) throws Exception { - LogContext logContext = new LogContext(); - ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false, logContext); + ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false); channelBuilder.configure(args.sslClientConfigs); try (Selector selector = new Selector(NetworkReceive.UNLIMITED, Selector.NO_IDLE_TIMEOUT_MS, new Metrics(), Time.SYSTEM, - "MetricGroup", new HashMap<>(), false, true, channelBuilder, MemoryPool.NONE, logContext)) { + "MetricGroup", new HashMap<>(), false, true, channelBuilder, MemoryPool.NONE, new LogContext())) { String node = "0"; server = createEchoServer(args, SecurityProtocol.SSL); @@ -966,7 +966,7 @@ public void testClosePlaintext(Args args) throws Exception { } private SslChannelBuilder newClientChannelBuilder() { - return new SslChannelBuilder(ConnectionMode.CLIENT, null, false, new LogContext()); + return new SslChannelBuilder(ConnectionMode.CLIENT, null, false); } private void testClose(Args args, SecurityProtocol securityProtocol, ChannelBuilder clientChannelBuilder) throws Exception { @@ -1096,14 +1096,14 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), CertStores invalidCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("127.0.0.1").build(); Map invalidConfigs = args.getTrustingConfig(invalidCertStores, args.clientCertStores); - verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "keystore with different SubjectAltName"); + verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs); Map missingStoreConfigs = new HashMap<>(); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12"); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "some.keystore.path"); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, new Password("some.keystore.password")); missingStoreConfigs.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("some.key.password")); - verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "keystore not found"); + verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs); // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE); @@ -1167,7 +1167,7 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), for (String propName : CertStores.KEYSTORE_PROPS) { invalidKeystoreConfigs.put(propName, invalidConfig.get(propName)); } - verifyInvalidReconfigure(reconfigurableBuilder, invalidKeystoreConfigs, "keystore without existing SubjectAltName"); + verifyInvalidReconfigure(reconfigurableBuilder, invalidKeystoreConfigs); String node3 = "3"; selector.connect(node3, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node3, 100, 10); @@ -1223,13 +1223,13 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), Map invalidConfigs = new HashMap<>(newTruststoreConfigs); invalidConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "INVALID_TYPE"); - verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "invalid truststore type"); + verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs); Map missingStoreConfigs = new HashMap<>(); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "some.truststore.path"); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, new Password("some.truststore.password")); - verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "truststore not found"); + verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs); // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE); @@ -1280,7 +1280,7 @@ public void testInvalidSslEngineFactory(Args args) { } private void verifyInvalidReconfigure(ListenerReconfigurable reconfigurable, - Map invalidConfigs, String errorMessage) { + Map invalidConfigs) { assertThrows(KafkaException.class, () -> reconfigurable.validateReconfiguration(invalidConfigs)); assertThrows(KafkaException.class, () -> reconfigurable.reconfigure(invalidConfigs)); } @@ -1310,10 +1310,9 @@ private NioEchoServer createEchoServer(Args args, SecurityProtocol securityProto } private Selector createSelector(Args args) { - LogContext logContext = new LogContext(); - ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false, logContext); + ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false); channelBuilder.configure(args.sslClientConfigs); - selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", channelBuilder, logContext); + selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", channelBuilder, new LogContext()); return selector; } @@ -1370,7 +1369,7 @@ static class TestSslChannelBuilder extends SslChannelBuilder { int flushDelayCount = 0; public TestSslChannelBuilder(ConnectionMode connectionMode) { - super(connectionMode, null, false, new LogContext()); + super(connectionMode, null, false); } public void configureBufferSizes(Integer netReadBufSize, Integer netWriteBufSize, Integer appBufSize) { diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java index 0d67c8aee6393..0ad81d17b5b93 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportTls12Tls13Test.java @@ -50,10 +50,9 @@ public void setup() throws Exception { sslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); sslClientConfigs = clientCertStores.getTrustingConfig(serverCertStores); - LogContext logContext = new LogContext(); - ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false, logContext); + ChannelBuilder channelBuilder = new SslChannelBuilder(ConnectionMode.CLIENT, null, false); channelBuilder.configure(sslClientConfigs); - this.selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", channelBuilder, logContext); + this.selector = new Selector(5000, new Metrics(), TIME, "MetricGroup", channelBuilder, new LogContext()); } @AfterEach diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java index 23c49412ab9e9..528fefed3f3db 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java @@ -53,7 +53,7 @@ public void testUniqueExceptions() { public void testExceptionsAreNotGeneric() { for (Errors error : Errors.values()) { if (error != Errors.NONE) - assertNotEquals(error.exception().getClass(), ApiException.class, "Generic ApiException should not be used"); + assertNotEquals(ApiException.class, error.exception().getClass(), "Generic ApiException should not be used"); } } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java index adcabba1a4977..7a6d05778eda0 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java @@ -19,6 +19,7 @@ import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class ProtocolTest { @@ -27,6 +28,18 @@ public void testToHtml() { var html = Protocol.toHtml(); assertFalse(html.isBlank()); assertFalse(html.contains("LeaderAndIsr"), "Removed LeaderAndIsr should not show in HTML"); + + String requestVersion; + String responseVersion; + for (ApiKeys key : ApiKeys.clientApis()) { + for (short version = key.oldestVersion(); version <= key.latestVersion(); version++) { + requestVersion = key.name + " Request (Version: " + version; + responseVersion = key.name + " Response (Version: " + version; + + assertTrue(html.contains(requestVersion), "Missing request header for " + key.name + " version:" + version); + assertTrue(html.contains(responseVersion), "Missing response header for " + key.name + " version:" + version); + } + } } } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java index 027dca8bf84fc..32ba528fe3eed 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java @@ -195,12 +195,9 @@ public void testReadArraySizeTooLarge() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - try { - type.read(invalidBuffer); - fail("Array size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> type.read(invalidBuffer), + "Array size not validated"); } @Test @@ -213,12 +210,9 @@ public void testReadCompactArraySizeTooLarge() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - try { - type.read(invalidBuffer); - fail("Array size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> type.read(invalidBuffer), + "Array size not validated"); } @Test @@ -252,12 +246,9 @@ public void testReadNegativeArraySize() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - try { - type.read(invalidBuffer); - fail("Array size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> type.read(invalidBuffer), + "Array size not validated"); } @Test @@ -270,12 +261,9 @@ public void testReadZeroCompactArraySize() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - try { - type.read(invalidBuffer); - fail("Array size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> type.read(invalidBuffer), + "Array size not validated"); } @Test @@ -285,19 +273,14 @@ public void testReadStringSizeTooLarge() { invalidBuffer.putShort((short) (stringBytes.length * 5)); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - try { - Type.STRING.read(invalidBuffer); - fail("String size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> Type.STRING.read(invalidBuffer), + "String size not validated"); + invalidBuffer.rewind(); - try { - Type.NULLABLE_STRING.read(invalidBuffer); - fail("String size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> Type.NULLABLE_STRING.read(invalidBuffer), + "String size not validated"); } @Test @@ -307,12 +290,10 @@ public void testReadNegativeStringSize() { invalidBuffer.putShort((short) -1); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - try { - Type.STRING.read(invalidBuffer); - fail("String size not validated"); - } catch (SchemaException e) { - // Expected exception - } + + assertThrows(SchemaException.class, + () -> Type.STRING.read(invalidBuffer), + "String size not validated"); } @Test @@ -322,19 +303,14 @@ public void testReadBytesSizeTooLarge() { invalidBuffer.putInt(stringBytes.length * 5); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - try { - Type.BYTES.read(invalidBuffer); - fail("Bytes size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> Type.BYTES.read(invalidBuffer), + "Bytes size not validated"); + invalidBuffer.rewind(); - try { - Type.NULLABLE_BYTES.read(invalidBuffer); - fail("Bytes size not validated"); - } catch (SchemaException e) { - // Expected exception - } + assertThrows(SchemaException.class, + () -> Type.NULLABLE_BYTES.read(invalidBuffer), + "Bytes size not validated"); } @Test @@ -344,12 +320,10 @@ public void testReadNegativeBytesSize() { invalidBuffer.putInt(-20); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - try { - Type.BYTES.read(invalidBuffer); - fail("Bytes size not validated"); - } catch (SchemaException e) { - // Expected exception - } + + assertThrows(SchemaException.class, + () -> Type.BYTES.read(invalidBuffer), + "Bytes size not validated"); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java index a2e89d3f4c669..a9d12285f1298 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java @@ -89,7 +89,7 @@ public void testAppendProtectsFromOverflow() throws Exception { FileChannel fileChannelMock = mock(FileChannel.class); when(fileChannelMock.size()).thenReturn((long) Integer.MAX_VALUE); - FileRecords records = new FileRecords(fileMock, fileChannelMock, 0, Integer.MAX_VALUE, false); + FileRecords records = new FileRecords(fileMock, fileChannelMock, Integer.MAX_VALUE); assertThrows(IllegalArgumentException.class, () -> append(records, values)); } @@ -99,7 +99,7 @@ public void testOpenOversizeFile() throws Exception { FileChannel fileChannelMock = mock(FileChannel.class); when(fileChannelMock.size()).thenReturn(Integer.MAX_VALUE + 5L); - assertThrows(KafkaException.class, () -> new FileRecords(fileMock, fileChannelMock, 0, Integer.MAX_VALUE, false)); + assertThrows(KafkaException.class, () -> new FileRecords(fileMock, fileChannelMock, Integer.MAX_VALUE)); } @Test @@ -142,7 +142,7 @@ public void testSliceSizeLimitWithConcurrentWrite() throws Exception { Future readerCompletion = executor.submit(() -> { while (log.sizeInBytes() < maxSizeInBytes) { int currentSize = log.sizeInBytes(); - FileRecords slice = log.slice(0, currentSize); + Records slice = log.slice(0, currentSize); assertEquals(currentSize, slice.sizeInBytes()); } return null; @@ -197,7 +197,7 @@ public void testIterationDoesntChangePosition() throws IOException { * Test a simple append and read. */ @Test - public void testRead() throws IOException { + public void testRead() { FileRecords read = fileRecords.slice(0, fileRecords.sizeInBytes()); assertEquals(fileRecords.sizeInBytes(), read.sizeInBytes()); TestUtils.checkEquals(fileRecords.batches(), read.batches()); @@ -279,13 +279,13 @@ public void testSearch() throws IOException { * Test that the message set iterator obeys start and end slicing */ @Test - public void testIteratorWithLimits() throws IOException { + public void testIteratorWithLimits() { RecordBatch batch = batches(fileRecords).get(1); int start = fileRecords.searchForOffsetFromPosition(1, 0).position; int size = batch.sizeInBytes(); - FileRecords slice = fileRecords.slice(start, size); + Records slice = fileRecords.slice(start, size); assertEquals(Collections.singletonList(batch), batches(slice)); - FileRecords slice2 = fileRecords.slice(start, size - 1); + Records slice2 = fileRecords.slice(start, size - 1); assertEquals(Collections.emptyList(), batches(slice2)); } @@ -313,7 +313,7 @@ public void testTruncateNotCalledIfSizeIsSameAsTargetSize() throws IOException { when(channelMock.size()).thenReturn(42L); when(channelMock.position(42L)).thenReturn(null); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); fileRecords.truncateTo(42); verify(channelMock, atLeastOnce()).size(); @@ -330,7 +330,7 @@ public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOExcepti when(channelMock.size()).thenReturn(42L); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); try { fileRecords.truncateTo(43); @@ -352,7 +352,7 @@ public void testTruncateIfSizeIsDifferentToTargetSize() throws IOException { when(channelMock.size()).thenReturn(42L); when(channelMock.truncate(anyLong())).thenReturn(channelMock); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); fileRecords.truncateTo(23); verify(channelMock, atLeastOnce()).size(); @@ -429,24 +429,22 @@ public void testSliceForAlreadySlicedFileRecords() throws IOException { "abcd".getBytes(), "efgh".getBytes(), "ijkl".getBytes(), - "mnop".getBytes(), - "qrst".getBytes() + "mnopqr".getBytes(), + "stuv".getBytes() }; try (FileRecords fileRecords = createFileRecords(values)) { List items = batches(fileRecords.slice(0, fileRecords.sizeInBytes())); // Slice from fourth message until the end. int position = IntStream.range(0, 3).map(i -> items.get(i).sizeInBytes()).sum(); - FileRecords sliced = fileRecords.slice(position, fileRecords.sizeInBytes() - position); + Records sliced = fileRecords.slice(position, fileRecords.sizeInBytes() - position); assertEquals(fileRecords.sizeInBytes() - position, sliced.sizeInBytes()); assertEquals(items.subList(3, items.size()), batches(sliced), "Read starting from the fourth message"); // Further slice the already sliced file records, from fifth message until the end. Now the - // bytes available in the sliced file records are less than the start position. However, the - // position to slice is relative hence reset position to second message in the sliced file - // records i.e. reset with the size of the fourth message from the original file records. - position = items.get(4).sizeInBytes(); - FileRecords finalSliced = sliced.slice(position, sliced.sizeInBytes() - position); + // bytes available in the sliced records are less than the moved position from original records. + position = items.get(3).sizeInBytes(); + Records finalSliced = sliced.slice(position, sliced.sizeInBytes() - position); assertEquals(sliced.sizeInBytes() - position, finalSliced.sizeInBytes()); assertEquals(items.subList(4, items.size()), batches(finalSliced), "Read starting from the fifth message"); } @@ -528,7 +526,7 @@ public void testBytesLengthOfWriteTo() throws IOException { * 1. If the target offset equals the base offset of the first batch * 2. If the target offset is less than the base offset of the first batch *

    - * If the base offset of the first batch is equal to or greater than the target offset, it should return the + * If the base offset of the first batch is equal to or greater than the target offset, it should return the * position of the first batch and the lastOffset method should not be called. */ @ParameterizedTest @@ -539,7 +537,7 @@ public void testSearchForOffsetFromPosition1(long baseOffset) throws IOException FileLogInputStream.FileChannelRecordBatch batch = mock(FileLogInputStream.FileChannelRecordBatch.class); when(batch.baseOffset()).thenReturn(baseOffset); - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, batch); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(5L, 0); @@ -559,7 +557,7 @@ public void testSearchForOffsetFromPosition2() throws IOException { when(batch.baseOffset()).thenReturn(3L); when(batch.lastOffset()).thenReturn(5L); - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, batch); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(5L, 0); @@ -583,7 +581,7 @@ public void testSearchForOffsetFromPosition3() throws IOException { when(currentBatch.baseOffset()).thenReturn(15L); when(currentBatch.lastOffset()).thenReturn(20L); - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, prevBatch, currentBatch); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(20L, 0); @@ -607,13 +605,13 @@ public void testSearchForOffsetFromPosition4() throws IOException { FileLogInputStream.FileChannelRecordBatch currentBatch = mock(FileLogInputStream.FileChannelRecordBatch.class); when(currentBatch.baseOffset()).thenReturn(15L); // >= targetOffset - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, prevBatch, currentBatch); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(10L, 0); assertEquals(FileRecords.LogOffsetPosition.fromBatch(prevBatch), result); - // Because the target offset is in the current batch, we should call lastOffset + // Because the target offset is in the current batch, we should call lastOffset // on the previous batch verify(prevBatch, times(1)).lastOffset(); } @@ -631,13 +629,13 @@ public void testSearchForOffsetFromPosition5() throws IOException { when(batch2.baseOffset()).thenReturn(8L); // < targetOffset when(batch2.lastOffset()).thenReturn(9L); // < targetOffset - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, batch1, batch2); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(10L, 0); assertNull(result); - // Because the target offset is exceeded by the last offset of the batch2, + // Because the target offset is exceeded by the last offset of the batch2, // we should call lastOffset on the batch2 verify(batch1, never()).lastOffset(); verify(batch2, times(1)).lastOffset(); @@ -659,7 +657,7 @@ public void testSearchForOffsetFromPosition6(long baseOffset) throws IOException when(batch2.baseOffset()).thenReturn(baseOffset); // < targetOffset or == targetOffset when(batch2.lastOffset()).thenReturn(12L); // >= targetOffset - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, batch1, batch2); long targetOffset = 10L; @@ -672,7 +670,7 @@ public void testSearchForOffsetFromPosition6(long baseOffset) throws IOException verify(batch1, never()).lastOffset(); verify(batch2, never()).lastOffset(); } else { - // Because the target offset is in the batch2, we should not call + // Because the target offset is in the batch2, we should not call // lastOffset on batch1 verify(batch1, never()).lastOffset(); verify(batch2, times(1)).lastOffset(); @@ -687,13 +685,13 @@ public void testSearchForOffsetFromPosition7() throws IOException { File mockFile = mock(File.class); FileChannel mockChannel = mock(FileChannel.class); FileLogInputStream.FileChannelRecordBatch batch1 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch1.baseOffset()).thenReturn(5L); - when(batch1.lastOffset()).thenReturn(10L); + when(batch1.baseOffset()).thenReturn(5L); + when(batch1.lastOffset()).thenReturn(10L); FileLogInputStream.FileChannelRecordBatch batch2 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch2.baseOffset()).thenReturn(15L); - when(batch2.lastOffset()).thenReturn(20L); + when(batch2.baseOffset()).thenReturn(15L); + when(batch2.lastOffset()).thenReturn(20L); - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 0, 100, false)); + FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); mockFileRecordBatches(fileRecords, batch1, batch2); FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(13L, 0); diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java index 84d917fa39d28..f102bd5fd9534 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.CloseableIterator; -import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestUtils; @@ -50,7 +49,6 @@ import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -91,8 +89,6 @@ public Stream provideArguments(ExtensionContext context) { } } - private final Time time = Time.SYSTEM; - @Test public void testUnsupportedCompress() { BiFunction builderBiFunction = (magic, compression) -> @@ -638,31 +634,6 @@ public Stream provideArguments(ExtensionContext context) { } } - private void verifyRecordsProcessingStats(Compression compression, RecordValidationStats processingStats, - int numRecords, int numRecordsConverted, long finalBytes, - long preConvertedBytes) { - assertNotNull(processingStats, "Records processing info is null"); - assertEquals(numRecordsConverted, processingStats.numRecordsConverted()); - // Since nanoTime accuracy on build machines may not be sufficient to measure small conversion times, - // only check if the value >= 0. Default is -1, so this checks if time has been recorded. - assertTrue(processingStats.conversionTimeNanos() >= 0, "Processing time not recorded: " + processingStats); - long tempBytes = processingStats.temporaryMemoryBytes(); - if (compression.type() == CompressionType.NONE) { - if (numRecordsConverted == 0) - assertEquals(finalBytes, tempBytes); - else if (numRecordsConverted == numRecords) - assertEquals(preConvertedBytes + finalBytes, tempBytes); - else { - assertTrue(tempBytes > finalBytes && tempBytes < finalBytes + preConvertedBytes, - String.format("Unexpected temp bytes %d final %d pre %d", tempBytes, finalBytes, preConvertedBytes)); - } - } else { - long compressedBytes = finalBytes - Records.LOG_OVERHEAD - LegacyRecord.RECORD_OVERHEAD_V0; - assertTrue(tempBytes > compressedBytes, - String.format("Uncompressed size expected temp=%d, compressed=%d", tempBytes, compressedBytes)); - } - } - private ByteBuffer allocateBuffer(int size, Args args) { ByteBuffer buffer = ByteBuffer.allocate(size); buffer.position(args.bufferOffset); diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java index 3818976e423fd..7092928010b30 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java @@ -39,10 +39,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.OptionalLong; import java.util.function.BiFunction; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static java.util.Arrays.asList; @@ -131,10 +135,6 @@ public void testIterator(Args args) { ByteBuffer buffer = ByteBuffer.allocate(1024); int partitionLeaderEpoch = 998; - MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, magic, compression, - TimestampType.CREATE_TIME, firstOffset, logAppendTime, pid, epoch, firstSequence, false, false, - partitionLeaderEpoch, buffer.limit()); - SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), @@ -144,10 +144,30 @@ public void testIterator(Args args) { new SimpleRecord(6L, (byte[]) null, null) }; - for (SimpleRecord record : records) - builder.append(record); + final MemoryRecords memoryRecords; + try (var builder = new MemoryRecordsBuilder( + buffer, + magic, + compression, + TimestampType.CREATE_TIME, + firstOffset, + logAppendTime, + pid, + epoch, + firstSequence, + false, + false, + partitionLeaderEpoch, + buffer.limit() + ) + ) { + for (SimpleRecord record : records) { + builder.append(record); + } + + memoryRecords = builder.build(); + } - MemoryRecords memoryRecords = builder.build(); for (int iteration = 0; iteration < 2; iteration++) { int total = 0; for (RecordBatch batch : memoryRecords.batches()) { @@ -1068,6 +1088,146 @@ public void testUnsupportedCompress() { }); } + @ParameterizedTest + @ArgumentsSource(MemoryRecordsArgumentsProvider.class) + public void testSlice(Args args) { + // Create a MemoryRecords instance with multiple batches. Prior RecordBatch.MAGIC_VALUE_V2, + // every append in a batch is a new batch. After RecordBatch.MAGIC_VALUE_V2, we can have multiple + // batches in a single MemoryRecords instance. Though with compression, we can have multiple + // appends resulting in a single batch prior RecordBatch.MAGIC_VALUE_V2 as well. + LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); + recordsPerOffset.put(args.firstOffset, 3); + recordsPerOffset.put(args.firstOffset + 6L, 8); + recordsPerOffset.put(args.firstOffset + 15L, 4); + MemoryRecords records = createMemoryRecords(args, recordsPerOffset); + + // Test slicing from start + MemoryRecords sliced = records.slice(0, records.sizeInBytes()); + assertEquals(records.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(records.validBytes(), sliced.validBytes()); + TestUtils.checkEquals(records.batches(), sliced.batches()); + + List items = batches(records); + // Test slicing first message. + RecordBatch first = items.get(0); + sliced = records.slice(first.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes()); + assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); + assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); + + // Read from second message and size is past the end of the file. + sliced = records.slice(first.sizeInBytes(), records.sizeInBytes()); + assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); + assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); + + // Read from second message and position + size overflows. + sliced = records.slice(first.sizeInBytes(), Integer.MAX_VALUE); + assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); + assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); + + // Read a single message starting from second message. + RecordBatch second = items.get(1); + sliced = records.slice(first.sizeInBytes(), second.sizeInBytes()); + assertEquals(second.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(Collections.singletonList(second), batches(sliced), "Read a single message starting from the second message"); + + // Read from already sliced view. + List remainingItems = IntStream.range(0, items.size()).filter(i -> i != 0 && i != 1).mapToObj(items::get).collect(Collectors.toList()); + int remainingSize = remainingItems.stream().mapToInt(RecordBatch::sizeInBytes).sum(); + sliced = records.slice(first.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes()) + .slice(second.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes() - second.sizeInBytes()); + assertEquals(remainingSize, sliced.sizeInBytes()); + assertEquals(remainingItems, batches(sliced), "Read starting from the third message"); + + // Read from second message and size is past the end of the file on the already sliced view. + sliced = records.slice(1, records.sizeInBytes() - 1) + .slice(first.sizeInBytes() - 1, records.sizeInBytes()); + assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); + assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); + + // Read from second message and position + size overflows on the already sliced view. + sliced = records.slice(1, records.sizeInBytes() - 1) + .slice(first.sizeInBytes() - 1, Integer.MAX_VALUE); + assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); + assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); + assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); + } + + @ParameterizedTest + @ArgumentsSource(MemoryRecordsArgumentsProvider.class) + public void testSliceInvalidPosition(Args args) { + MemoryRecords records = createMemoryRecords(args, Map.of(args.firstOffset, 1)); + assertThrows(IllegalArgumentException.class, () -> records.slice(-1, records.sizeInBytes())); + assertThrows(IllegalArgumentException.class, () -> records.slice(records.sizeInBytes() + 1, records.sizeInBytes())); + } + + @ParameterizedTest + @ArgumentsSource(MemoryRecordsArgumentsProvider.class) + public void testSliceInvalidSize(Args args) { + MemoryRecords records = createMemoryRecords(args, Map.of(args.firstOffset, 1)); + assertThrows(IllegalArgumentException.class, () -> records.slice(0, -1)); + } + + @Test + public void testSliceEmptyRecords() { + MemoryRecords empty = MemoryRecords.EMPTY; + Records sliced = empty.slice(0, 0); + assertEquals(0, sliced.sizeInBytes()); + assertEquals(0, batches(sliced).size()); + } + + /** + * Test slice when already sliced memory records have start position greater than available bytes + * in the memory records. + */ + @ParameterizedTest + @ArgumentsSource(MemoryRecordsArgumentsProvider.class) + public void testSliceForAlreadySlicedMemoryRecords(Args args) { + LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); + recordsPerOffset.put(args.firstOffset, 5); + recordsPerOffset.put(args.firstOffset + 5L, 10); + recordsPerOffset.put(args.firstOffset + 15L, 12); + recordsPerOffset.put(args.firstOffset + 27L, 4); + + MemoryRecords records = createMemoryRecords(args, recordsPerOffset); + List items = batches(records.slice(0, records.sizeInBytes())); + + // Slice from third message until the end. + int position = IntStream.range(0, 2).map(i -> items.get(i).sizeInBytes()).sum(); + Records sliced = records.slice(position, records.sizeInBytes() - position); + assertEquals(records.sizeInBytes() - position, sliced.sizeInBytes()); + assertEquals(items.subList(2, items.size()), batches(sliced), "Read starting from the third message"); + + // Further slice the already sliced memory records, from fourth message until the end. Now the + // bytes available in the sliced records are less than the moved position from original records. + position = items.get(2).sizeInBytes(); + Records finalSliced = sliced.slice(position, sliced.sizeInBytes() - position); + assertEquals(sliced.sizeInBytes() - position, finalSliced.sizeInBytes()); + assertEquals(items.subList(3, items.size()), batches(finalSliced), "Read starting from the fourth message"); + } + + private MemoryRecords createMemoryRecords(Args args, Map recordsPerOffset) { + ByteBuffer buffer = ByteBuffer.allocate(1024); + recordsPerOffset.forEach((offset, numOfRecords) -> { + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, args.magic, args.compression, + TimestampType.CREATE_TIME, offset); + for (int i = 0; i < numOfRecords; i++) { + builder.appendWithOffset(offset + i, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + } + builder.close(); + }); + buffer.flip(); + + return MemoryRecords.readableRecords(buffer); + } + + private static List batches(Records buffer) { + return TestUtils.toList(buffer.batches()); + } + private static class RetainNonNullKeysFilter extends MemoryRecords.RecordFilter { public RetainNonNullKeysFilter() { super(0, 0); diff --git a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java index c66b0411fc510..d9d42d4d92295 100644 --- a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java @@ -46,8 +46,8 @@ public void testSameRackSelector() { ReplicaSelector selector = new RackAwareReplicaSelector(); Optional selected = selector.select(tp, metadata("rack-b"), partitionView); assertOptional(selected, replicaInfo -> { - assertEquals(replicaInfo.endpoint().rack(), "rack-b", "Expect replica to be in rack-b"); - assertEquals(replicaInfo.endpoint().id(), 3, "Expected replica 3 since it is more caught-up"); + assertEquals("rack-b", replicaInfo.endpoint().rack(), "Expect replica to be in rack-b"); + assertEquals(3, replicaInfo.endpoint().id(), "Expected replica 3 since it is more caught-up"); }); selected = selector.select(tp, metadata("not-a-rack"), partitionView); @@ -57,7 +57,7 @@ public void testSameRackSelector() { selected = selector.select(tp, metadata("rack-a"), partitionView); assertOptional(selected, replicaInfo -> { - assertEquals(replicaInfo.endpoint().rack(), "rack-a", "Expect replica to be in rack-a"); + assertEquals("rack-a", replicaInfo.endpoint().rack(), "Expect replica to be in rack-a"); assertEquals(replicaInfo, leader, "Expect the leader since it's in rack-a"); }); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java index 4e6aa4f87e3a1..85f35e683ec8a 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java @@ -44,9 +44,6 @@ public class CreateAclsRequestTest { private static final AclBinding LITERAL_ACL1 = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "foo", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.DENY)); - private static final AclBinding LITERAL_ACL2 = new AclBinding(new ResourcePattern(ResourceType.GROUP, "group", PatternType.LITERAL), - new AccessControlEntry("User:*", "127.0.0.1", AclOperation.WRITE, AclPermissionType.ALLOW)); - private static final AclBinding PREFIXED_ACL1 = new AclBinding(new ResourcePattern(ResourceType.GROUP, "prefix", PatternType.PREFIXED), new AccessControlEntry("User:*", "127.0.0.1", AclOperation.CREATE, AclPermissionType.ALLOW)); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java index a4abf88c83703..bf199275db868 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java @@ -22,13 +22,12 @@ import org.apache.kafka.common.message.DeleteAclsResponseData; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; - import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -98,9 +97,9 @@ public void shouldRoundTripV1() { .setThrottleTimeMs(10) .setFilterResults(asList(LITERAL_RESPONSE, PREFIXED_RESPONSE)), V1); - final ByteBuffer buffer = original.serialize(V1); + final Readable readable = original.serialize(V1); - final DeleteAclsResponse result = DeleteAclsResponse.parse(buffer, V1); + final DeleteAclsResponse result = DeleteAclsResponse.parse(readable, V1); assertEquals(original.filterResults(), result.filterResults()); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java index e09c1eee72155..5062677e12300 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java @@ -33,9 +33,6 @@ public class DescribeAclsRequestTest { private static final short V1 = 1; - private static final AclBindingFilter LITERAL_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.TOPIC, "foo", PatternType.LITERAL), - new AccessControlEntryFilter("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.DENY)); - private static final AclBindingFilter PREFIXED_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.GROUP, "prefix", PatternType.PREFIXED), new AccessControlEntryFilter("User:*", "127.0.0.1", AclOperation.CREATE, AclPermissionType.ALLOW)); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java index 243b3a80e6f29..c4ec20385e102 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java @@ -25,13 +25,13 @@ import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription; import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -74,12 +74,6 @@ public class DescribeAclsResponseTest { PatternType.LITERAL, Collections.singletonList(ALLOW_CREATE_ACL)); - private static final DescribeAclsResource LITERAL_ACL2 = buildResource( - "group", - ResourceType.GROUP, - PatternType.LITERAL, - Collections.singletonList(DENY_READ_ACL)); - @Test public void shouldThrowIfUnknown() { assertThrows(IllegalArgumentException.class, @@ -90,9 +84,9 @@ public void shouldThrowIfUnknown() { public void shouldRoundTripV1() { List resources = Arrays.asList(LITERAL_ACL1, PREFIXED_ACL1); final DescribeAclsResponse original = buildResponse(100, Errors.NONE, resources); - final ByteBuffer buffer = original.serialize(V1); + final Readable readable = original.serialize(V1); - final DescribeAclsResponse result = DescribeAclsResponse.parse(buffer, V1); + final DescribeAclsResponse result = DescribeAclsResponse.parse(readable, V1); assertResponseEquals(original, result); final DescribeAclsResponse result2 = buildResponse(100, Errors.NONE, DescribeAclsResponse.aclsResources( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java index 60d10a689394c..c70b7eda5da98 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java @@ -19,14 +19,15 @@ import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.JoinGroupRequestData; +import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; import java.util.Arrays; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; public class JoinGroupRequestTest { @@ -47,12 +48,9 @@ public void shouldThrowOnInvalidGroupInstanceIds() { String[] invalidGroupInstanceIds = {"", "foo bar", "..", "foo:bar", "foo=bar", ".", new String(longString)}; for (String instanceId : invalidGroupInstanceIds) { - try { - JoinGroupRequest.validateGroupInstanceId(instanceId); - fail("No exception was thrown for invalid instance id: " + instanceId); - } catch (InvalidConfigurationException e) { - // Good - } + assertThrows(InvalidConfigurationException.class, + () -> JoinGroupRequest.validateGroupInstanceId(instanceId), + "InvalidConfigurationException expected as instance id is invalid."); } } @Test @@ -65,4 +63,20 @@ public void testRequestVersionCompatibilityFailBuild() { .setProtocolType("consumer") ).build((short) 4)); } + + @Test + public void testRebalanceTimeoutDefaultsToSessionTimeoutV0() { + int sessionTimeoutMs = 30000; + short version = 0; + + var buffer = MessageUtil.toByteBufferAccessor(new JoinGroupRequestData() + .setGroupId("groupId") + .setMemberId("consumerId") + .setProtocolType("consumer") + .setSessionTimeoutMs(sessionTimeoutMs), version); + + JoinGroupRequest request = JoinGroupRequest.parse(buffer, version); + assertEquals(sessionTimeoutMs, request.data().sessionTimeoutMs()); + assertEquals(sessionTimeoutMs, request.data().rebalanceTimeoutMs()); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java index 0f098b462a21a..4d73c042c5bb7 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java @@ -22,13 +22,13 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.EnumMap; @@ -111,9 +111,9 @@ public void testEqualityWithSerialization() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { LeaveGroupResponse primaryResponse = LeaveGroupResponse.parse( - MessageUtil.toByteBufferAccessor(responseData, version).buffer(), version); + MessageUtil.toByteBufferAccessor(responseData, version), version); LeaveGroupResponse secondaryResponse = LeaveGroupResponse.parse( - MessageUtil.toByteBufferAccessor(responseData, version).buffer(), version); + MessageUtil.toByteBufferAccessor(responseData, version), version); assertEquals(primaryResponse, primaryResponse); assertEquals(primaryResponse, secondaryResponse); @@ -130,7 +130,7 @@ public void testParse() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { - ByteBuffer buffer = MessageUtil.toByteBufferAccessor(data, version).buffer(); + Readable buffer = MessageUtil.toByteBufferAccessor(data, version); LeaveGroupResponse leaveGroupResponse = LeaveGroupResponse.parse(buffer, version); assertEquals(expectedErrorCounts, leaveGroupResponse.errorCounts()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java index 2aa2f0c150b10..48542c1a2fd7d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java @@ -127,19 +127,23 @@ public void testListOffsetsRequestOldestVersion() { .forConsumer(false, IsolationLevel.READ_COMMITTED); ListOffsetsRequest.Builder maxTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, true, false, false); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, true, false, false, false); ListOffsetsRequest.Builder requireEarliestLocalTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, true, false); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, true, false, false); ListOffsetsRequest.Builder requireTieredStorageTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, true); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, true, false); - assertEquals((short) 0, consumerRequestBuilder.oldestAllowedVersion()); + ListOffsetsRequest.Builder requireEarliestPendingUploadTimestampRequestBuilder = ListOffsetsRequest.Builder + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, false, true); + + assertEquals((short) 1, consumerRequestBuilder.oldestAllowedVersion()); assertEquals((short) 1, requireTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 2, requestCommittedRequestBuilder.oldestAllowedVersion()); assertEquals((short) 7, maxTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 8, requireEarliestLocalTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 9, requireTieredStorageTimestampRequestBuilder.oldestAllowedVersion()); + assertEquals((short) 11, requireEarliestPendingUploadTimestampRequestBuilder.oldestAllowedVersion()); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java index 161a4dd5f1192..9cd95cfec769e 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestPartition; @@ -45,6 +46,8 @@ public class OffsetCommitRequestTest { protected static String groupId = "groupId"; protected static String memberId = "consumerId"; protected static String groupInstanceId = "groupInstanceId"; + protected static Uuid topicIdOne = Uuid.randomUuid(); + protected static Uuid topicIdTwo = Uuid.randomUuid(); protected static String topicOne = "topicOne"; protected static String topicTwo = "topicTwo"; protected static int partitionOne = 1; @@ -61,6 +64,7 @@ public class OffsetCommitRequestTest { public void setUp() { List topics = Arrays.asList( new OffsetCommitRequestTopic() + .setTopicId(topicIdOne) .setName(topicOne) .setPartitions(Collections.singletonList( new OffsetCommitRequestPartition() @@ -70,6 +74,7 @@ public void setUp() { .setCommittedMetadata(metadata) )), new OffsetCommitRequestTopic() + .setTopicId(topicIdTwo) .setName(topicTwo) .setPartitions(Collections.singletonList( new OffsetCommitRequestPartition() @@ -90,7 +95,7 @@ public void testConstructor() { expectedOffsets.put(new TopicPartition(topicOne, partitionOne), offset); expectedOffsets.put(new TopicPartition(topicTwo, partitionTwo), offset); - OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(data); + OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames(data); for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { OffsetCommitRequest request = builder.build(version); @@ -105,7 +110,7 @@ public void testConstructor() { @Test public void testVersionSupportForGroupInstanceId() { - OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder( + OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames( new OffsetCommitRequestData() .setGroupId(groupId) .setMemberId(memberId) @@ -127,12 +132,14 @@ public void testGetErrorResponse() { OffsetCommitResponseData expectedResponse = new OffsetCommitResponseData() .setTopics(Arrays.asList( new OffsetCommitResponseTopic() + .setTopicId(topicIdOne) .setName(topicOne) .setPartitions(Collections.singletonList( new OffsetCommitResponsePartition() .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) .setPartitionIndex(partitionOne))), new OffsetCommitResponseTopic() + .setTopicId(topicIdTwo) .setName(topicTwo) .setPartitions(Collections.singletonList( new OffsetCommitResponsePartition() diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java index 7351db55b544e..dcfb988116797 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java @@ -23,11 +23,11 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.Readable; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.EnumMap; @@ -88,8 +88,8 @@ public void testParse() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { - ByteBuffer buffer = MessageUtil.toByteBufferAccessor(data, version).buffer(); - OffsetCommitResponse response = OffsetCommitResponse.parse(buffer, version); + Readable readable = MessageUtil.toByteBufferAccessor(data, version); + OffsetCommitResponse response = OffsetCommitResponse.parse(readable, version); assertEquals(expectedErrorCounts, response.errorCounts()); if (version >= 3) { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java index 1098925e42a0c..9ea1bc540d78d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java @@ -16,220 +16,250 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; +import org.apache.kafka.common.message.OffsetFetchRequestData; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.OffsetFetchRequest.Builder; -import org.apache.kafka.common.requests.OffsetFetchRequest.NoBatchedOffsetFetchRequestException; -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Optional; -import static org.apache.kafka.common.requests.AbstractResponse.DEFAULT_THROTTLE_TIME; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; public class OffsetFetchRequestTest { - - private final String topicOne = "topic1"; - private final int partitionOne = 1; - private final String topicTwo = "topic2"; - private final int partitionTwo = 2; - private final String topicThree = "topic3"; - private final String group1 = "group1"; - private final String group2 = "group2"; - private final String group3 = "group3"; - private final String group4 = "group4"; - private final String group5 = "group5"; - private final List groups = Arrays.asList(group1, group2, group3, group4, group5); - - private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7); - - - private OffsetFetchRequest.Builder builder; - - @Test - public void testConstructor() { - List partitions = Arrays.asList( - new TopicPartition(topicOne, partitionOne), - new TopicPartition(topicTwo, partitionTwo)); - int throttleTimeMs = 10; - - Map expectedData = new HashMap<>(); - for (TopicPartition partition : partitions) { - expectedData.put(partition, new PartitionData( - OffsetFetchResponse.INVALID_OFFSET, - Optional.empty(), - OffsetFetchResponse.NO_METADATA, - Errors.NONE + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testWithMultipleGroups(short version) { + var data = new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1, 2)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp2") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("bar") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1, 2)) + )) )); + var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); + + if (version < 8) { + assertThrows(OffsetFetchRequest.NoBatchedOffsetFetchRequestException.class, () -> builder.build(version)); + } else { + assertEquals(data, builder.build(version).data()); } + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testThrowOnFetchStableOffsetsUnsupported(short version) { + var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(true) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1, 2)) + )) + )), + true, + true + ); + + if (version < 7) { + assertThrows(UnsupportedVersionException.class, () -> builder.build(version)); + } else { + builder.build(version); + } + } - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version < 8) { - builder = new OffsetFetchRequest.Builder( - group1, - false, - partitions, - false); - OffsetFetchRequest request = builder.build(version); - assertFalse(request.isAllPartitions()); - assertEquals(group1, request.groupId()); - assertEquals(partitions, request.partitions()); - - OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); - assertEquals(Errors.NONE, response.error()); - assertFalse(response.hasError()); - assertEquals(Collections.singletonMap(Errors.NONE, version <= (short) 1 ? 3 : 1), response.errorCounts(), - "Incorrect error count for version " + version); - - if (version <= 1) { - assertEquals(expectedData, response.responseDataV0ToV7()); - } - - if (version >= 3) { - assertEquals(throttleTimeMs, response.throttleTimeMs()); - } else { - assertEquals(DEFAULT_THROTTLE_TIME, response.throttleTimeMs()); - } - } else { - builder = new Builder(Collections.singletonMap(group1, partitions), false, false); - OffsetFetchRequest request = builder.build(version); - Map> groupToPartitionMap = - request.groupIdsToPartitions(); - Map> groupToTopicMap = - request.groupIdsToTopics(); - assertFalse(request.isAllPartitionsForGroup(group1)); - assertTrue(groupToPartitionMap.containsKey(group1) && groupToTopicMap.containsKey( - group1)); - assertEquals(partitions, groupToPartitionMap.get(group1)); - OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); - assertEquals(Errors.NONE, response.groupLevelError(group1)); - assertFalse(response.groupHasError(group1)); - assertEquals(Collections.singletonMap(Errors.NONE, 1), response.errorCounts(), - "Incorrect error count for version " + version); - assertEquals(throttleTimeMs, response.throttleTimeMs()); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testSingleGroup(short version) { + var data = new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1, 2)) + )) + )); + var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); + + if (version < 8) { + var expectedRequest = new OffsetFetchRequestData() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopic() + .setName("foo") + .setPartitionIndexes(List.of(0, 1, 2)) + )); + assertEquals(expectedRequest, builder.build(version).data()); + } else { + assertEquals(data, builder.build(version).data()); } } - @Test - public void testConstructorWithMultipleGroups() { - List topic1Partitions = Arrays.asList( - new TopicPartition(topicOne, partitionOne), - new TopicPartition(topicOne, partitionTwo)); - List topic2Partitions = Arrays.asList( - new TopicPartition(topicTwo, partitionOne), - new TopicPartition(topicTwo, partitionTwo)); - List topic3Partitions = Arrays.asList( - new TopicPartition(topicThree, partitionOne), - new TopicPartition(topicThree, partitionTwo)); - Map> groupToTp = new HashMap<>(); - groupToTp.put(group1, topic1Partitions); - groupToTp.put(group2, topic2Partitions); - groupToTp.put(group3, topic3Partitions); - groupToTp.put(group4, null); - groupToTp.put(group5, null); - int throttleTimeMs = 10; - - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - builder = new Builder(groupToTp, false, false); - OffsetFetchRequest request = builder.build(version); - Map> groupToPartitionMap = - request.groupIdsToPartitions(); - Map> groupToTopicMap = - request.groupIdsToTopics(); - assertEquals(groupToTp.keySet(), groupToTopicMap.keySet()); - assertEquals(groupToTp.keySet(), groupToPartitionMap.keySet()); - assertFalse(request.isAllPartitionsForGroup(group1)); - assertFalse(request.isAllPartitionsForGroup(group2)); - assertFalse(request.isAllPartitionsForGroup(group3)); - assertTrue(request.isAllPartitionsForGroup(group4)); - assertTrue(request.isAllPartitionsForGroup(group5)); - OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); - for (String group : groups) { - assertEquals(Errors.NONE, response.groupLevelError(group)); - assertFalse(response.groupHasError(group)); - } - assertEquals(Collections.singletonMap(Errors.NONE, 5), response.errorCounts(), - "Incorrect error count for version " + version); - assertEquals(throttleTimeMs, response.throttleTimeMs()); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testSingleGroupWithAllTopics(short version) { + var data = new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(null) + )); + var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); + + if (version < 2) { + assertThrows(UnsupportedVersionException.class, () -> builder.build(version)); + } else if (version < 8) { + var expectedRequest = new OffsetFetchRequestData() + .setGroupId("grp1") + .setTopics(null); + assertEquals(expectedRequest, builder.build(version).data()); + } else { + assertEquals(data, builder.build(version).data()); } } - @Test - public void testBuildThrowForUnsupportedBatchRequest() { - for (int version : listOfVersionsNonBatchOffsetFetch) { - Map> groupPartitionMap = new HashMap<>(); - groupPartitionMap.put(group1, null); - groupPartitionMap.put(group2, null); - builder = new Builder(groupPartitionMap, true, false); - final short finalVersion = (short) version; - assertThrows(NoBatchedOffsetFetchRequestException.class, () -> builder.build(finalVersion)); + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testGetErrorResponse(short version) { + var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1)) + )) + )), + false, + true + ).build(version); + + if (version < 2) { + var expectedResponse = new OffsetFetchResponseData() + .setThrottleTimeMs(1000) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH), + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setMetadata(OffsetFetchResponse.NO_METADATA) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + )) + )); + assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); + } else if (version < 8) { + var expectedResponse = new OffsetFetchResponseData() + .setThrottleTimeMs(1000) + .setErrorCode(Errors.INVALID_GROUP_ID.code()); + assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); + } else { + var expectedResponse = new OffsetFetchResponseData() + .setThrottleTimeMs(1000) + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("grp1") + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + )); + assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); } } - @Test - public void testConstructorFailForUnsupportedRequireStable() { - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version < 8) { - // The builder needs to be initialized every cycle as the internal data `requireStable` flag is flipped. - builder = new OffsetFetchRequest.Builder(group1, true, null, false); - final short finalVersion = version; - if (version < 2) { - assertThrows(UnsupportedVersionException.class, () -> builder.build(finalVersion)); - } else { - OffsetFetchRequest request = builder.build(finalVersion); - assertEquals(group1, request.groupId()); - assertNull(request.partitions()); - assertTrue(request.isAllPartitions()); - if (version < 7) { - assertFalse(request.requireStable()); - } else { - assertTrue(request.requireStable()); - } - } - } else { - builder = new Builder(Collections.singletonMap(group1, null), true, false); - OffsetFetchRequest request = builder.build(version); - Map> groupToPartitionMap = - request.groupIdsToPartitions(); - Map> groupToTopicMap = - request.groupIdsToTopics(); - assertTrue(groupToPartitionMap.containsKey(group1) && groupToTopicMap.containsKey( - group1)); - assertNull(groupToPartitionMap.get(group1)); - assertTrue(request.isAllPartitionsForGroup(group1)); - assertTrue(request.requireStable()); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testGroups(short version) { + var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(Uuid.randomUuid()) + .setPartitionIndexes(List.of(0, 1, 2)) + )) + )), + false, + true + ).build(version); + + if (version < 8) { + var expectedGroups = List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setPartitionIndexes(List.of(0, 1, 2)) + )) + ); + assertEquals(expectedGroups, request.groups()); + } else { + assertEquals(request.data().groups(), request.groups()); } } - @Test - public void testBuildThrowForUnsupportedRequireStable() { - for (int version : listOfVersionsNonBatchOffsetFetch) { - builder = new OffsetFetchRequest.Builder(group1, true, null, true); - if (version < 7) { - final short finalVersion = (short) version; - assertThrows(UnsupportedVersionException.class, () -> builder.build(finalVersion)); - } else { - OffsetFetchRequest request = builder.build((short) version); - assertTrue(request.requireStable()); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 2) + public void testGroupsWithAllTopics(short version) { + var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(null) + )), + false, + true + ).build(version); + + if (version < 8) { + var expectedGroups = List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp1") + .setTopics(null) + ); + assertEquals(expectedGroups, request.groups()); + } else { + assertEquals(request.data().groups(), request.groups()); } } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java index d0ef79b4479e8..f3750784c6ed0 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java @@ -16,427 +16,349 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchResponseData; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseGroup; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartition; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartitions; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopic; -import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopics; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; -import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; -import java.util.Collections; -import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.Optional; -import static org.apache.kafka.common.requests.AbstractResponse.DEFAULT_THROTTLE_TIME; +import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH; +import static org.apache.kafka.common.requests.OffsetFetchResponse.INVALID_OFFSET; +import static org.apache.kafka.common.requests.OffsetFetchResponse.NO_METADATA; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; public class OffsetFetchResponseTest { - private final int throttleTimeMs = 10; - private final int offset = 100; - private final String metadata = "metadata"; - - private final String groupOne = "group1"; - private final String groupTwo = "group2"; - private final String groupThree = "group3"; - private final String topicOne = "topic1"; - private final int partitionOne = 1; - private final Optional leaderEpochOne = Optional.of(1); - private final String topicTwo = "topic2"; - private final int partitionTwo = 2; - private final Optional leaderEpochTwo = Optional.of(2); - private final String topicThree = "topic3"; - private final int partitionThree = 3; - private final Optional leaderEpochThree = Optional.of(3); - - - private Map partitionDataMap; - - @BeforeEach - public void setUp() { - partitionDataMap = new HashMap<>(); - partitionDataMap.put(new TopicPartition(topicOne, partitionOne), new PartitionData( - offset, - leaderEpochOne, - metadata, - Errors.TOPIC_AUTHORIZATION_FAILED - )); - partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( - offset, - leaderEpochTwo, - metadata, - Errors.UNKNOWN_TOPIC_OR_PARTITION - )); - } - - @Test - public void testConstructor() { - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version < 8) { - OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NOT_COORDINATOR, partitionDataMap); - assertEquals(Errors.NOT_COORDINATOR, response.error()); - assertEquals(3, response.errorCounts().size()); - assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), - Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1)), - response.errorCounts()); - - assertEquals(throttleTimeMs, response.throttleTimeMs()); - - Map responseData = response.responseDataV0ToV7(); - assertEquals(partitionDataMap, responseData); - responseData.forEach((tp, data) -> assertTrue(data.hasError())); - } else { - OffsetFetchResponse response = new OffsetFetchResponse( - throttleTimeMs, - Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), - Collections.singletonMap(groupOne, partitionDataMap)); - assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne)); - assertEquals(3, response.errorCounts().size()); - assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), - Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1)), - response.errorCounts()); - - assertEquals(throttleTimeMs, response.throttleTimeMs()); - - Map responseData = response.partitionDataMap(groupOne); - assertEquals(partitionDataMap, responseData); - responseData.forEach((tp, data) -> assertTrue(data.hasError())); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testBuilderWithSingleGroup(short version) { + var group = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )); + + if (version < 8) { + assertEquals( + new OffsetFetchResponseData() + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )), + new OffsetFetchResponse.Builder(group).build(version).data() + ); + } else { + assertEquals( + new OffsetFetchResponseData() + .setGroups(List.of(group)), + new OffsetFetchResponse.Builder(group).build(version).data() + ); } } - @Test - public void testConstructorWithMultipleGroups() { - Map> responseData = new HashMap<>(); - Map errorMap = new HashMap<>(); - Map pd1 = new HashMap<>(); - Map pd2 = new HashMap<>(); - Map pd3 = new HashMap<>(); - pd1.put(new TopicPartition(topicOne, partitionOne), new PartitionData( - offset, - leaderEpochOne, - metadata, - Errors.TOPIC_AUTHORIZATION_FAILED)); - pd2.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( - offset, - leaderEpochTwo, - metadata, - Errors.UNKNOWN_TOPIC_OR_PARTITION)); - pd3.put(new TopicPartition(topicThree, partitionThree), new PartitionData( - offset, - leaderEpochThree, - metadata, - Errors.NONE)); - responseData.put(groupOne, pd1); - responseData.put(groupTwo, pd2); - responseData.put(groupThree, pd3); - errorMap.put(groupOne, Errors.NOT_COORDINATOR); - errorMap.put(groupTwo, Errors.COORDINATOR_LOAD_IN_PROGRESS); - errorMap.put(groupThree, Errors.NONE); - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version >= 8) { - OffsetFetchResponse response = new OffsetFetchResponse( - throttleTimeMs, errorMap, responseData); - - assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne)); - assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, response.groupLevelError(groupTwo)); - assertEquals(Errors.NONE, response.groupLevelError(groupThree)); - assertTrue(response.groupHasError(groupOne)); - assertTrue(response.groupHasError(groupTwo)); - assertFalse(response.groupHasError(groupThree)); - assertEquals(5, response.errorCounts().size()); - assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), - Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), - Utils.mkEntry(Errors.COORDINATOR_LOAD_IN_PROGRESS, 1), - Utils.mkEntry(Errors.NONE, 2)), - response.errorCounts()); - - assertEquals(throttleTimeMs, response.throttleTimeMs()); + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testBuilderWithMultipleGroups(short version) { + var groups = List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )), + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group2") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("bar") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )) + ); - Map responseData1 = response.partitionDataMap(groupOne); - assertEquals(pd1, responseData1); - responseData1.forEach((tp, data) -> assertTrue(data.hasError())); - Map responseData2 = response.partitionDataMap(groupTwo); - assertEquals(pd2, responseData2); - responseData2.forEach((tp, data) -> assertTrue(data.hasError())); - Map responseData3 = response.partitionDataMap(groupThree); - assertEquals(pd3, responseData3); - responseData3.forEach((tp, data) -> assertFalse(data.hasError())); - } + if (version < 8) { + assertThrows(UnsupportedVersionException.class, + () -> new OffsetFetchResponse.Builder(groups).build(version)); + } else { + assertEquals( + new OffsetFetchResponseData() + .setGroups(groups), + new OffsetFetchResponse.Builder(groups).build(version).data() + ); } } - /** - * Test behavior changes over the versions. Refer to resources.common.messages.OffsetFetchResponse.json - */ - @Test - public void testStructBuild() { - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version < 8) { - partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( - offset, - leaderEpochTwo, - metadata, - Errors.GROUP_AUTHORIZATION_FAILED - )); - - OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap); - OffsetFetchResponseData data = new OffsetFetchResponseData( - new ByteBufferAccessor(latestResponse.serialize(version)), version); - - OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version); - - if (version <= 1) { - assertEquals(Errors.NONE.code(), data.errorCode()); - - // Partition level error populated in older versions. - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, oldResponse.error()); - assertEquals(Utils.mkMap(Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 2), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), - oldResponse.errorCounts()); - } else { - assertEquals(Errors.NONE.code(), data.errorCode()); - - assertEquals(Errors.NONE, oldResponse.error()); - assertEquals(Utils.mkMap( - Utils.mkEntry(Errors.NONE, 1), - Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), - oldResponse.errorCounts()); - } - - if (version <= 2) { - assertEquals(DEFAULT_THROTTLE_TIME, oldResponse.throttleTimeMs()); - } else { - assertEquals(throttleTimeMs, oldResponse.throttleTimeMs()); - } - - Map expectedDataMap = new HashMap<>(); - for (Map.Entry entry : partitionDataMap.entrySet()) { - PartitionData partitionData = entry.getValue(); - expectedDataMap.put(entry.getKey(), new PartitionData( - partitionData.offset, - version <= 4 ? Optional.empty() : partitionData.leaderEpoch, - partitionData.metadata, - partitionData.error - )); - } - - Map responseData = oldResponse.responseDataV0ToV7(); - assertEquals(expectedDataMap, responseData); - - responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError())); - } else { - partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( - offset, - leaderEpochTwo, - metadata, - Errors.GROUP_AUTHORIZATION_FAILED)); - OffsetFetchResponse latestResponse = new OffsetFetchResponse( - throttleTimeMs, - Collections.singletonMap(groupOne, Errors.NONE), - Collections.singletonMap(groupOne, partitionDataMap)); - OffsetFetchResponseData data = new OffsetFetchResponseData( - new ByteBufferAccessor(latestResponse.serialize(version)), version); - OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version); - assertEquals(Errors.NONE.code(), data.groups().get(0).errorCode()); - - assertEquals(Errors.NONE, oldResponse.groupLevelError(groupOne)); - assertEquals(Utils.mkMap( - Utils.mkEntry(Errors.NONE, 1), - Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), - Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), - oldResponse.errorCounts()); - assertEquals(throttleTimeMs, oldResponse.throttleTimeMs()); - - Map expectedDataMap = new HashMap<>(); - for (Map.Entry entry : partitionDataMap.entrySet()) { - PartitionData partitionData = entry.getValue(); - expectedDataMap.put(entry.getKey(), new PartitionData( - partitionData.offset, - partitionData.leaderEpoch, - partitionData.metadata, - partitionData.error - )); - } - - Map responseData = oldResponse.partitionDataMap(groupOne); - assertEquals(expectedDataMap, responseData); - - responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError())); - } + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testGroupWithSingleGroup(short version) { + var data = new OffsetFetchResponseData(); + + if (version < 8) { + data.setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )); + } else { + data.setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("foo") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )) + )); } - } - @Test - public void testShouldThrottle() { - for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { - if (version < 8) { - OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap); - if (version >= 4) { - assertTrue(response.shouldClientThrottle(version)); - } else { - assertFalse(response.shouldClientThrottle(version)); - } - } else { - OffsetFetchResponse response = new OffsetFetchResponse( - throttleTimeMs, - Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), - Collections.singletonMap(groupOne, partitionDataMap)); - assertTrue(response.shouldClientThrottle(version)); - } - } + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("foo") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )), + new OffsetFetchResponse(data, version).group("foo") + ); } - @Test - public void testNullableMetadataV0ToV7() { - PartitionData pd = new PartitionData( - offset, - leaderEpochOne, - null, - Errors.UNKNOWN_TOPIC_OR_PARTITION); - // test PartitionData.equals with null metadata - assertEquals(pd, pd); - partitionDataMap.clear(); - partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd); - - OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.GROUP_AUTHORIZATION_FAILED, partitionDataMap); - OffsetFetchResponseData expectedData = - new OffsetFetchResponseData() - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()) - .setThrottleTimeMs(throttleTimeMs) - .setTopics(Collections.singletonList( - new OffsetFetchResponseTopic() - .setName(topicOne) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartition() - .setPartitionIndex(partitionOne) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpochOne.orElse(-1)) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - .setMetadata(null)) + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) + public void testGroupWithMultipleGroups(short version) { + var groups = List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") )) - ); - assertEquals(expectedData, response.data()); - } + )), + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group2") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("bar") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(10) + .setCommittedLeaderEpoch(5) + .setMetadata("metadata") + )) + )) + ); - @Test - public void testNullableMetadataV8AndAbove() { - PartitionData pd = new PartitionData( - offset, - leaderEpochOne, - null, - Errors.UNKNOWN_TOPIC_OR_PARTITION); - // test PartitionData.equals with null metadata - assertEquals(pd, pd); - partitionDataMap.clear(); - partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd); + var response = new OffsetFetchResponse( + new OffsetFetchResponseData().setGroups(groups), + version + ); - OffsetFetchResponse response = new OffsetFetchResponse( - throttleTimeMs, - Collections.singletonMap(groupOne, Errors.GROUP_AUTHORIZATION_FAILED), - Collections.singletonMap(groupOne, partitionDataMap)); - OffsetFetchResponseData expectedData = - new OffsetFetchResponseData() - .setGroups(Collections.singletonList( - new OffsetFetchResponseGroup() - .setGroupId(groupOne) - .setTopics(Collections.singletonList( - new OffsetFetchResponseTopics() - .setName(topicOne) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartitions() - .setPartitionIndex(partitionOne) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpochOne.orElse(-1)) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - .setMetadata(null))))) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()))) - .setThrottleTimeMs(throttleTimeMs); - assertEquals(expectedData, response.data()); + groups.forEach(group -> + assertEquals(group, response.group(group.groupId())) + ); } - @Test - public void testUseDefaultLeaderEpochV0ToV7() { - final Optional emptyLeaderEpoch = Optional.empty(); - partitionDataMap.clear(); + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testGroupWithSingleGroupWithTopLevelError(short version) { + var data = new OffsetFetchResponseData(); + + if (version < 2) { + data.setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + )) + )); + } else if (version < 8) { + data.setErrorCode(Errors.INVALID_GROUP_ID.code()); + } else { + data.setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("foo") + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + )); + } - partitionDataMap.put(new TopicPartition(topicOne, partitionOne), - new PartitionData( - offset, - emptyLeaderEpoch, - metadata, - Errors.UNKNOWN_TOPIC_OR_PARTITION) + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("foo") + .setErrorCode(Errors.INVALID_GROUP_ID.code()), + new OffsetFetchResponse(data, version).group("foo") ); - - OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NOT_COORDINATOR, partitionDataMap); - OffsetFetchResponseData expectedData = - new OffsetFetchResponseData() - .setErrorCode(Errors.NOT_COORDINATOR.code()) - .setThrottleTimeMs(throttleTimeMs) - .setTopics(Collections.singletonList( - new OffsetFetchResponseTopic() - .setName(topicOne) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartition() - .setPartitionIndex(partitionOne) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - .setMetadata(metadata)) - )) - ); - assertEquals(expectedData, response.data()); } - @Test - public void testUseDefaultLeaderEpochV8() { - final Optional emptyLeaderEpoch = Optional.empty(); - partitionDataMap.clear(); + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testSingleGroupWithError(short version) { + var group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setPartitionIndexes(List.of(0)) + )); + + if (version < 2) { + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setErrorCode(Errors.INVALID_GROUP_ID.code()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) + )) + )), + OffsetFetchResponse.groupError(group, Errors.INVALID_GROUP_ID, version) + ); + } else { + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setErrorCode(Errors.INVALID_GROUP_ID.code()), + OffsetFetchResponse.groupError(group, Errors.INVALID_GROUP_ID, version) + ); + } + } - partitionDataMap.put(new TopicPartition(topicOne, partitionOne), - new PartitionData( - offset, - emptyLeaderEpoch, - metadata, - Errors.UNKNOWN_TOPIC_OR_PARTITION) - ); - OffsetFetchResponse response = new OffsetFetchResponse( - throttleTimeMs, - Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), - Collections.singletonMap(groupOne, partitionDataMap)); - OffsetFetchResponseData expectedData = - new OffsetFetchResponseData() - .setGroups(Collections.singletonList( - new OffsetFetchResponseGroup() - .setGroupId(groupOne) - .setTopics(Collections.singletonList( - new OffsetFetchResponseTopics() - .setName(topicOne) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartitions() - .setPartitionIndex(partitionOne) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - .setMetadata(metadata))))) - .setErrorCode(Errors.NOT_COORDINATOR.code()))) - .setThrottleTimeMs(throttleTimeMs); - assertEquals(expectedData, response.data()); + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + public void testErrorCounts(short version) { + if (version < 2) { + var data = new OffsetFetchResponseData() + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) + )) + )); + assertEquals( + Map.of(Errors.UNSTABLE_OFFSET_COMMIT, 1), + new OffsetFetchResponse(data, version).errorCounts() + ); + } else if (version < 8) { + // Version 2 returns a top level error code for group or coordinator level errors. + var data = new OffsetFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopic() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) + )) + )); + assertEquals( + Map.of( + Errors.NONE, 1, + Errors.UNSTABLE_OFFSET_COMMIT, 1 + ), + new OffsetFetchResponse(data, version).errorCounts() + ); + } else { + // Version 8 adds support for fetching offsets for multiple groups at a time. + var data = new OffsetFetchResponseData() + .setGroups(List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setErrorCode(Errors.NONE.code()) + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) + )) + )) + )); + assertEquals( + Map.of( + Errors.NONE, 1, + Errors.UNSTABLE_OFFSET_COMMIT, 1 + ), + new OffsetFetchResponse(data, version).errorCounts() + ); + } } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java index 42a1e1f39681d..eb1627055e11d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.InvalidRecordException; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.message.ProduceRequestData; @@ -54,7 +55,7 @@ public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() { final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(1) @@ -83,7 +84,7 @@ public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() { final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(1) @@ -102,13 +103,36 @@ public void testBuildWithCurrentMessageFormat() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) + new ProduceRequestData.TopicProduceData() + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) .iterator())) .setAcks((short) 1) .setTimeoutMs(5000), false); - assertEquals(3, requestBuilder.oldestAllowedVersion()); + assertEquals(ApiKeys.PRODUCE.oldestVersion(), requestBuilder.oldestAllowedVersion()); + assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion()); + } + + @Test + public void testBuildWithCurrentMessageFormatWithoutTopicId() { + ByteBuffer buffer = ByteBuffer.allocate(256); + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, + Compression.NONE, TimestampType.CREATE_TIME, 0L); + builder.append(10L, null, "a".getBytes()); + ProduceRequest.Builder requestBuilder = ProduceRequest.builder( + new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("topic") // TopicId will default to Uuid.ZERO and client will get UNKNOWN_TOPIC_ID error. + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) + .iterator())) + .setAcks((short) 1) + .setTimeoutMs(5000), + false); + assertEquals(ApiKeys.PRODUCE.oldestVersion(), requestBuilder.oldestAllowedVersion()); assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion()); } @@ -129,7 +153,7 @@ public void testV3AndAboveShouldContainOnlyOneRecordBatch() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("test") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -144,7 +168,7 @@ public void testV3AndAboveCannotHaveNoRecordBatches() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("test") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -164,7 +188,7 @@ public void testV3AndAboveCannotUseMagicV0() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("test") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -184,7 +208,7 @@ public void testV3AndAboveCannotUseMagicV1() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("test") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(builder.build())))) @@ -204,7 +228,7 @@ public void testV6AndBelowCannotUseZStdCompression() { ProduceRequestData produceData = new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("test") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(builder.build())))) @@ -235,10 +259,12 @@ public void testMixedTransactionalData() { ProduceRequest.Builder builder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) + new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), + new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) .iterator())) .setAcks((short) -1) .setTimeoutMs(5000), @@ -262,10 +288,12 @@ public void testMixedIdempotentData() { ProduceRequest.Builder builder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) + new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), + new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) .iterator())) .setAcks((short) -1) .setTimeoutMs(5000), @@ -281,7 +309,7 @@ public void testBuilderOldestAndLatestAllowed() { ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(1) .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord)))) @@ -302,7 +330,7 @@ private ProduceRequest createNonIdempotentNonTransactionalRecords() { return ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic") + .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(1) .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord))))) diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java index 2c4f1c792244f..75d9c6d9232c9 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java @@ -17,7 +17,8 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; @@ -40,8 +41,10 @@ public class ProduceResponseTest { @SuppressWarnings("deprecation") @Test public void produceResponseVersionTest() { - Map responseData = new HashMap<>(); - responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); + Map responseData = new HashMap<>(); + Uuid topicId = Uuid.fromString("5JkYABorYD4w0AQXe9TvBG"); + TopicIdPartition topicIdPartition = new TopicIdPartition(topicId, 0, "test"); + responseData.put(topicIdPartition, new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); ProduceResponse v0Response = new ProduceResponse(responseData); ProduceResponse v1Response = new ProduceResponse(responseData, 10); ProduceResponse v2Response = new ProduceResponse(responseData, 10); @@ -61,14 +64,16 @@ public void produceResponseVersionTest() { assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode())); assertNull(partitionProduceResponse.errorMessage()); assertTrue(partitionProduceResponse.recordErrors().isEmpty()); + assertEquals(topicIdPartition.topicId(), topicProduceResponse.topicId()); } } @SuppressWarnings("deprecation") @Test public void produceResponseRecordErrorsTest() { - Map responseData = new HashMap<>(); - TopicPartition tp = new TopicPartition("test", 0); + Map responseData = new HashMap<>(); + Uuid topicId = Uuid.fromString("4w0AQXe9TvBG5JkYABorYD"); + TopicIdPartition tp = new TopicIdPartition(topicId, 0, "test"); ProduceResponse.PartitionResponse partResponse = new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new ProduceResponse.RecordError(3, "Record error")), diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java index aad3be459a682..ba53edcacf8ce 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java @@ -84,7 +84,7 @@ public void testSerdeUnsupportedApiVersionRequest() throws Exception { assertEquals(correlationId, responseHeader.correlationId()); ApiVersionsResponse response = (ApiVersionsResponse) AbstractResponse.parseResponse(ApiKeys.API_VERSIONS, - responseBuffer, (short) 0); + new ByteBufferAccessor(responseBuffer), (short) 0); assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data().errorCode()); assertTrue(response.data().apiKeys().isEmpty()); } @@ -156,7 +156,7 @@ private ByteBuffer produceRequest(short version) { .setTimeoutMs(1); data.topicData().add( new ProduceRequestData.TopicProduceData() - .setName("foo") + .setName("foo") // versions in this test < 13, topicId can't be used .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(42)))); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index b33dec17d9a0f..ffb95673e5517 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -175,8 +175,8 @@ import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData; -import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; +import org.apache.kafka.common.message.ListConfigResourcesRequestData; +import org.apache.kafka.common.message.ListConfigResourcesResponseData; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition; @@ -199,6 +199,8 @@ import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection; +import org.apache.kafka.common.message.OffsetFetchRequestData; +import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection; @@ -276,8 +278,6 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; @@ -324,11 +324,12 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; // This class performs tests requests and responses for all API keys public class RequestResponseTest { + private static final Uuid TOPIC_ID = Uuid.randomUuid(); + // Exception includes a message that we verify is not included in error responses private final UnknownServerException unknownServerException = new UnknownServerException("secret"); @@ -453,8 +454,10 @@ public void cannotUseFindCoordinatorV0ToFindTransactionCoordinator() { @Test public void testProduceRequestPartitionSize() { - TopicPartition tp0 = new TopicPartition("test", 0); - TopicPartition tp1 = new TopicPartition("test", 1); + Uuid topicId = Uuid.fromString("e9TvBGX5JkYAB0AQorYD4w"); + String topicName = "foo"; + TopicIdPartition tpId0 = createTopicIdPartition(topicId, 0, topicName); + TopicIdPartition tpId1 = createTopicIdPartition(topicId, 1, topicName); MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, new SimpleRecord("woot".getBytes())); MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, @@ -462,19 +465,24 @@ public void testProduceRequestPartitionSize() { ProduceRequest request = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList( - new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData( - singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))), - new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData( - singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1)))) - .iterator())) + createTopicProduceData(PRODUCE.latestVersion(), records0, tpId0), + createTopicProduceData(PRODUCE.latestVersion(), records1, tpId1)).iterator())) .setAcks((short) 1) .setTimeoutMs(5000) .setTransactionalId("transactionalId"), true) .build((short) 7); assertEquals(2, request.partitionSizes().size()); - assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0)); - assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1)); + + assertEquals(records0.sizeInBytes(), partitionSize(request.partitionSizes(), tpId0)); + assertEquals(records1.sizeInBytes(), partitionSize(request.partitionSizes(), tpId1)); + } + + private int partitionSize(Map partitionSizes, TopicIdPartition topicIdPartition) { + return partitionSizes.entrySet().stream() + .filter(tpId -> tpId.getKey().topicId() == topicIdPartition.topicId() && + tpId.getKey().partition() == topicIdPartition.partition()).map(Map.Entry::getValue) + .findFirst().get(); } @Test @@ -487,12 +495,9 @@ public void produceRequestToStringTest() { assertFalse(request.toString(true).contains("numPartitions")); request.clearPartitionRecords(); - try { - request.data(); - fail("dataOrException should fail after clearPartitionRecords()"); - } catch (IllegalStateException e) { - // OK - } + assertThrows(IllegalStateException.class, + request::data, + "DataOrException should fail after clearPartitionRecords()"); // `toString` should behave the same after `clearPartitionRecords` assertFalse(request.toString(false).contains("partitionSizes")); @@ -536,7 +541,7 @@ public void fetchResponseVersionTest() { .setHighWatermark(1000000) .setLogStartOffset(-1) .setRecords(records)); - FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData); + FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData, List.of()); FetchResponse v12Deserialized = FetchResponse.parse(idTestResponse.serialize((short) 12), (short) 12); FetchResponse newestDeserialized = FetchResponse.parse(idTestResponse.serialize(FETCH.latestVersion()), FETCH.latestVersion()); assertTrue(v12Deserialized.topicIds().isEmpty()); @@ -577,7 +582,7 @@ public void testFetchResponseV4() { .setLastStableOffset(6) .setRecords(records)); - FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData); + FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData, List.of()); FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4); assertEquals(responseData.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().topicPartition(), Map.Entry::getValue)), deserialized.responseData(topicNames, (short) 4)); @@ -604,7 +609,7 @@ public void testFetchResponseShouldNotHaveNullRecords() { TopicIdPartition topicIdPartition = new TopicIdPartition(id, new TopicPartition("test", 0)); LinkedHashMap tpToData = new LinkedHashMap<>(Map.of(topicIdPartition, partitionData)); - fetchResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpToData); + fetchResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpToData, List.of()); validateNoNullRecords(fetchResponse); } @@ -663,7 +668,7 @@ private void verifyFetchResponseFullWrite(short version, FetchResponse fetchResp ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer(), responseHeaderVersion); assertEquals(correlationId, responseHeader.correlationId()); - assertEquals(fetchResponse.serialize(version), buf); + assertEquals(fetchResponse.serialize(version).buffer(), buf); FetchResponseData deserialized = new FetchResponseData(new ByteBufferAccessor(buf), version); ObjectSerializationCache serializationCache = new ObjectSerializationCache(); assertEquals(size, responseHeader.size() + deserialized.size(serializationCache, version)); @@ -723,6 +728,14 @@ public void testFetchRequestWithMetadata() { assertEquals(request.isolationLevel(), deserialized.isolationLevel()); } + @Test + public void testJoinGroupRequestV0RebalanceTimeout() { + final short version = 0; + JoinGroupRequest jgr = createJoinGroupRequest(version); + JoinGroupRequest jgr2 = JoinGroupRequest.parse(jgr.serialize(), version); + assertEquals(jgr2.data().rebalanceTimeoutMs(), jgr.data().rebalanceTimeoutMs()); + } + @Test public void testSerializeWithHeader() { CreatableTopicCollection topicsToCreate = new CreatableTopicCollection(1); @@ -770,55 +783,6 @@ public void testSerializeWithInconsistentHeaderVersion() { assertThrows(IllegalArgumentException.class, () -> createTopicsRequest.serializeWithHeader(requestHeader)); } - @Test - public void testOffsetFetchRequestBuilderToStringV0ToV7() { - List stableFlags = asList(true, false); - for (Boolean requireStable : stableFlags) { - String allTopicPartitionsString = new OffsetFetchRequest.Builder( - "someGroup", - requireStable, - null, - false - ).toString(); - - assertTrue(allTopicPartitionsString.contains("groupId='', topics=[]," - + " groups=[OffsetFetchRequestGroup(groupId='someGroup', memberId=null, memberEpoch=-1, topics=null)], requireStable=" + requireStable)); - String string = new OffsetFetchRequest.Builder( - "group1", - requireStable, - singletonList( - new TopicPartition("test11", 1)), - false - ).toString(); - assertTrue(string.contains("test11")); - assertTrue(string.contains("group1")); - assertTrue(string.contains("requireStable=" + requireStable)); - } - } - - @ParameterizedTest - @ValueSource(booleans = {false, true}) - public void testOffsetFetchRequestBuilderToStringV8AndAbove(boolean requireStable) { - String allTopicPartitionsString = new OffsetFetchRequest.Builder( - Collections.singletonMap("someGroup", null), - requireStable, - false - ).toString(); - assertTrue(allTopicPartitionsString.contains("groups=[OffsetFetchRequestGroup" - + "(groupId='someGroup', memberId=null, memberEpoch=-1, topics=null)], requireStable=" + requireStable)); - - String subsetTopicPartitionsString = new OffsetFetchRequest.Builder( - Collections.singletonMap( - "group1", - singletonList(new TopicPartition("test11", 1))), - requireStable, - false - ).toString(); - assertTrue(subsetTopicPartitionsString.contains("test11")); - assertTrue(subsetTopicPartitionsString.contains("group1")); - assertTrue(subsetTopicPartitionsString.contains("requireStable=" + requireStable)); - } - @Test public void testApiVersionsRequestBeforeV3Validation() { for (short version = 0; version < 3; version++) { @@ -897,9 +861,9 @@ public void testUnregisterBrokerResponseWithUnknownServerError() { new UnregisterBrokerRequestData() ).build((short) 0); String customerErrorMessage = "customer error message"; - + UnregisterBrokerResponse response = request.getErrorResponse( - 0, + 0, new RuntimeException(customerErrorMessage) ); @@ -915,8 +879,8 @@ private ApiVersionsResponse defaultApiVersionsResponse() { @Test public void testApiVersionResponseParsingFallback() { for (short version : API_VERSIONS.allVersions()) { - ByteBuffer buffer = defaultApiVersionsResponse().serialize((short) 0); - ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, version); + ByteBufferAccessor readable = defaultApiVersionsResponse().serialize((short) 0); + ApiVersionsResponse response = ApiVersionsResponse.parse(readable, version); assertEquals(Errors.NONE.code(), response.data().errorCode()); } } @@ -924,15 +888,16 @@ public void testApiVersionResponseParsingFallback() { @Test public void testApiVersionResponseParsingFallbackException() { for (final short version : API_VERSIONS.allVersions()) { - assertThrows(BufferUnderflowException.class, () -> ApiVersionsResponse.parse(ByteBuffer.allocate(0), version)); + assertThrows(BufferUnderflowException.class, + () -> ApiVersionsResponse.parse(new ByteBufferAccessor(ByteBuffer.allocate(0)), version)); } } @Test public void testApiVersionResponseParsing() { for (short version : API_VERSIONS.allVersions()) { - ByteBuffer buffer = defaultApiVersionsResponse().serialize(version); - ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, version); + ByteBufferAccessor readable = defaultApiVersionsResponse().serialize(version); + ApiVersionsResponse response = ApiVersionsResponse.parse(readable, version); assertEquals(Errors.NONE.code(), response.data().errorCode()); } } @@ -1091,7 +1056,7 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case GET_TELEMETRY_SUBSCRIPTIONS: return createGetTelemetrySubscriptionsRequest(version); case PUSH_TELEMETRY: return createPushTelemetryRequest(version); case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); - case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); + case LIST_CONFIG_RESOURCES: return createListConfigResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); @@ -1164,7 +1129,7 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ALTER_CLIENT_QUOTAS: return createAlterClientQuotasResponse(); case DESCRIBE_USER_SCRAM_CREDENTIALS: return createDescribeUserScramCredentialsResponse(); case ALTER_USER_SCRAM_CREDENTIALS: return createAlterUserScramCredentialsResponse(); - case VOTE: return createVoteResponse(version); + case VOTE: return createVoteResponse(); case BEGIN_QUORUM_EPOCH: return createBeginQuorumEpochResponse(); case END_QUORUM_EPOCH: return createEndQuorumEpochResponse(); case DESCRIBE_QUORUM: return createDescribeQuorumResponse(); @@ -1186,7 +1151,7 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case GET_TELEMETRY_SUBSCRIPTIONS: return createGetTelemetrySubscriptionsResponse(); case PUSH_TELEMETRY: return createPushTelemetryResponse(); case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); - case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); + case LIST_CONFIG_RESOURCES: return createListConfigResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); @@ -1227,7 +1192,7 @@ private ConsumerGroupDescribeResponse createConsumerGroupDescribeResponse() { .setGroupEpoch(0) .setAssignmentEpoch(0) .setAssignorName("range") - .setMembers(new ArrayList(0)) + .setMembers(new ArrayList<>(0)) )) .setThrottleTimeMs(1000); return new ConsumerGroupDescribeResponse(data); @@ -1486,10 +1451,10 @@ private ShareFetchRequest createShareFetchRequest(short version) { ShareFetchRequestData data = new ShareFetchRequestData() .setGroupId("group") .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() + .setTopics(new ShareFetchRequestData.FetchTopicCollection(List.of(new ShareFetchRequestData.FetchTopic() .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))); + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(List.of(new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(0)).iterator()))).iterator())); return new ShareFetchRequest.Builder(data).build(version); } @@ -1512,24 +1477,24 @@ private ShareFetchResponse createShareFetchResponse() { private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() .setMemberId(Uuid.randomUuid().toString()) - .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() .setTopicId(Uuid.randomUuid()) - .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(List.of(new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(0) - .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); + .setAcknowledgeTypes(Collections.singletonList((byte) 0))))).iterator()))).iterator())); return new ShareAcknowledgeRequest.Builder(data).build(version); } private ShareAcknowledgeResponse createShareAcknowledgeResponse() { ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + data.setResponses(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection(List.of(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() .setTopicId(Uuid.randomUuid()) .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))))); + .setErrorCode(Errors.NONE.code())))).iterator())); data.setThrottleTimeMs(345); data.setErrorCode(Errors.NONE.code()); return new ShareAcknowledgeResponse(data); @@ -1700,7 +1665,7 @@ private VoteRequest createVoteRequest(short version) { return new VoteRequest.Builder(data).build(version); } - private VoteResponse createVoteResponse(short version) { + private VoteResponse createVoteResponse() { VoteResponseData.PartitionData partitionData = new VoteResponseData.PartitionData() .setErrorCode(Errors.NONE.code()) .setLeaderEpoch(0) @@ -1998,9 +1963,10 @@ private void checkResponse(AbstractResponse response, short version) { // Check for equality and hashCode of the Struct only if indicated (it is likely to fail if any of the fields // in the response is a HashMap with multiple elements since ordering of the elements may vary) try { - ByteBuffer serializedBytes = response.serialize(version); - AbstractResponse deserialized = AbstractResponse.parseResponse(response.apiKey(), serializedBytes, version); - ByteBuffer serializedBytes2 = deserialized.serialize(version); + ByteBufferAccessor readable = response.serialize(version); + ByteBuffer serializedBytes = readable.buffer(); + AbstractResponse deserialized = AbstractResponse.parseResponse(response.apiKey(), readable, version); + ByteBuffer serializedBytes2 = deserialized.serialize(version).buffer(); serializedBytes.rewind(); assertEquals(serializedBytes, serializedBytes2, "Response " + response + "failed equality test"); } catch (Exception e) { @@ -2063,7 +2029,7 @@ private FetchRequest createFetchRequest(short version) { private FetchResponse createFetchResponse(Errors error, int sessionId) { return FetchResponse.parse( - FetchResponse.of(error, 25, sessionId, new LinkedHashMap<>()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + FetchResponse.of(error, 25, sessionId, new LinkedHashMap<>(), List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(int sessionId) { @@ -2085,7 +2051,7 @@ private FetchResponse createFetchResponse(int sessionId) { .setAbortedTransactions(abortedTransactions) .setRecords(MemoryRecords.EMPTY)); return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, sessionId, - responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + responseData, List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(boolean includeAborted) { @@ -2110,7 +2076,7 @@ private FetchResponse createFetchResponse(boolean includeAborted) { .setAbortedTransactions(abortedTransactions) .setRecords(MemoryRecords.EMPTY)); return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, INVALID_SESSION_ID, - responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + responseData, List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(short version) { @@ -2401,7 +2367,7 @@ private MetadataResponse createMetadataResponse() { } private OffsetCommitRequest createOffsetCommitRequest(short version) { - return new OffsetCommitRequest.Builder(new OffsetCommitRequestData() + return OffsetCommitRequest.Builder.forTopicNames(new OffsetCommitRequestData() .setGroupId("group1") .setMemberId("consumer1") .setGroupInstanceId(null) @@ -2409,6 +2375,7 @@ private OffsetCommitRequest createOffsetCommitRequest(short version) { .setTopics(singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("test") + .setTopicId(TOPIC_ID) .setPartitions(asList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) @@ -2430,6 +2397,7 @@ private OffsetCommitResponse createOffsetCommitResponse() { .setTopics(singletonList( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("test") + .setTopicId(TOPIC_ID) .setPartitions(singletonList( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) @@ -2440,93 +2408,118 @@ private OffsetCommitResponse createOffsetCommitResponse() { } private OffsetFetchRequest createOffsetFetchRequest(short version, boolean requireStable) { - if (version < 8) { - return new OffsetFetchRequest.Builder( - "group1", - requireStable, - singletonList(new TopicPartition("test11", 1)), - false) - .build(version); - } - return new OffsetFetchRequest.Builder( - Collections.singletonMap( - "group1", - singletonList(new TopicPartition("test11", 1))), - requireStable, - false) - .build(version); + return OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group1") + .setMemberId(version >= 9 ? "memberid" : null) + .setMemberEpoch(version >= 9 ? 10 : -1) + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(version < 10 ? "test11" : "") + .setTopicId(version >= 10 ? TOPIC_ID : Uuid.ZERO_UUID) + .setPartitionIndexes(List.of(1)) + )) + )), + false, + true + ).build(version); } private OffsetFetchRequest createOffsetFetchRequestWithMultipleGroups(short version, boolean requireStable) { - Map> groupToPartitionMap = new HashMap<>(); - List topic1 = singletonList( - new TopicPartition("topic1", 0)); - List topic2 = asList( - new TopicPartition("topic1", 0), - new TopicPartition("topic2", 0), - new TopicPartition("topic2", 1)); - List topic3 = asList( - new TopicPartition("topic1", 0), - new TopicPartition("topic2", 0), - new TopicPartition("topic2", 1), - new TopicPartition("topic3", 0), - new TopicPartition("topic3", 1), - new TopicPartition("topic3", 2)); - groupToPartitionMap.put("group1", topic1); - groupToPartitionMap.put("group2", topic2); - groupToPartitionMap.put("group3", topic3); - groupToPartitionMap.put("group4", null); - groupToPartitionMap.put("group5", null); - - return new OffsetFetchRequest.Builder( - groupToPartitionMap, - requireStable, - false + return OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic1") + .setPartitionIndexes(List.of(0)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group2") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic1") + .setPartitionIndexes(List.of(0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic2") + .setPartitionIndexes(List.of(0, 1)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group3") + .setTopics(List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic1") + .setPartitionIndexes(List.of(0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic2") + .setPartitionIndexes(List.of(0, 1)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("topic3") + .setPartitionIndexes(List.of(0, 1, 2)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group4") + .setTopics(null), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group5") + .setTopics(null) + )), + false, + true ).build(version); } private OffsetFetchRequest createOffsetFetchRequestForAllPartition(short version, boolean requireStable) { - if (version < 8) { - return new OffsetFetchRequest.Builder( - "group1", - requireStable, - null, - false) - .build(version); - } - return new OffsetFetchRequest.Builder( - Collections.singletonMap( - "group1", null), - requireStable, - false) - .build(version); + return OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group1") + .setMemberId(version >= 9 ? "memberid" : null) + .setMemberEpoch(version >= 9 ? 10 : -1) + .setTopics(null) + )), + false, + true + ).build(version); } private OffsetFetchResponse createOffsetFetchResponse(short version) { - Map responseData = new HashMap<>(); - responseData.put(new TopicPartition("test", 0), new OffsetFetchResponse.PartitionData( - 100L, Optional.empty(), "", Errors.NONE)); - responseData.put(new TopicPartition("test", 1), new OffsetFetchResponse.PartitionData( - 100L, Optional.of(10), null, Errors.NONE)); - if (version < 8) { - return new OffsetFetchResponse(Errors.NONE, responseData); - } - int throttleMs = 10; - return new OffsetFetchResponse(throttleMs, Collections.singletonMap("group1", Errors.NONE), - Collections.singletonMap("group1", responseData)); + var group = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group1") + .setTopics(List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("test") + .setPartitions(List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(10) + .setMetadata(null) + )) + )); + return new OffsetFetchResponse.Builder(group).build(version); } private ProduceRequest createProduceRequest(short version) { + TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "test"); MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, new SimpleRecord("woot".getBytes())); return ProduceRequest.builder( new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(records)))).iterator())) + .setTopicData(new ProduceRequestData.TopicProduceDataCollection( + singletonList(createTopicProduceData(version, records, topicIdPartition)).iterator() + )) .setAcks((short) 1) .setTimeoutMs(5000) .setTransactionalId(version >= 3 ? "transactionalId" : null), @@ -2534,18 +2527,37 @@ private ProduceRequest createProduceRequest(short version) { .build(version); } + private static ProduceRequestData.TopicProduceData createTopicProduceData(short version, MemoryRecords records, TopicIdPartition tp) { + ProduceRequestData.TopicProduceData topicProduceData = new ProduceRequestData.TopicProduceData() + .setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition()) + .setRecords(records))); + if (version >= 13) { + topicProduceData.setTopicId(tp.topicId()); + } else { + topicProduceData.setName(tp.topic()); + } + return topicProduceData; + } + + private static TopicIdPartition createTopicIdPartition(Uuid topicId, int partitionIndex, String topicName) { + return new TopicIdPartition(topicId, partitionIndex, topicName); + } + @SuppressWarnings("deprecation") private ProduceResponse createProduceResponse() { - Map responseData = new HashMap<>(); - responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, + Map responseData = new HashMap<>(); + Uuid topicId = Uuid.fromString("0AQorYD4we9TvBGX5JkYAB"); + responseData.put(new TopicIdPartition(topicId, 0, "test"), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); return new ProduceResponse(responseData, 0); } @SuppressWarnings("deprecation") private ProduceResponse createProduceResponseWithErrorMessage() { - Map responseData = new HashMap<>(); - responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, + Map responseData = new HashMap<>(); + Uuid topicId = Uuid.fromString("0AQorYD4we9TvBGX5JkYAB"); + responseData.put(new TopicIdPartition(topicId, 0, "test"), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100, singletonList(new ProduceResponse.RecordError(0, "error message")), "global error message")); return new ProduceResponse(responseData, 0); @@ -3628,15 +3640,18 @@ private PushTelemetryResponse createPushTelemetryResponse() { return new PushTelemetryResponse(response); } - private ListClientMetricsResourcesRequest createListClientMetricsResourcesRequest(short version) { - return new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build(version); + private ListConfigResourcesRequest createListConfigResourcesRequest(short version) { + return version == 0 ? + new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(List.of(ConfigResource.Type.CLIENT_METRICS.id()))).build(version) : + new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build(version); } - private ListClientMetricsResourcesResponse createListClientMetricsResourcesResponse() { - ListClientMetricsResourcesResponseData response = new ListClientMetricsResourcesResponseData(); + private ListConfigResourcesResponse createListConfigResourcesResponse() { + ListConfigResourcesResponseData response = new ListConfigResourcesResponseData(); response.setErrorCode(Errors.NONE.code()); response.setThrottleTimeMs(10); - return new ListClientMetricsResourcesResponse(response); + return new ListConfigResourcesResponse(response); } private InitializeShareGroupStateRequest createInitializeShareGroupStateRequest(short version) { @@ -3785,8 +3800,7 @@ private DeleteShareGroupOffsetsRequest createDeleteShareGroupOffsetsRequest(shor DeleteShareGroupOffsetsRequestData data = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") .setTopics(List.of(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic() - .setTopicName("topic-1") - .setPartitions(List.of(0)))); + .setTopicName("topic-1"))); return new DeleteShareGroupOffsetsRequest.Builder(data).build(version); } @@ -3807,12 +3821,13 @@ private DescribeShareGroupOffsetsResponse createDescribeShareGroupOffsetsRespons private AlterShareGroupOffsetsResponse createAlterShareGroupOffsetsResponse() { AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData() - .setResponses(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() - .setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))) - .setTopicName("topic") - .setTopicId(Uuid.randomUuid()))); + .setResponses(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() + .setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()))) + .setTopicName("topic") + .setTopicId(Uuid.randomUuid())).iterator())); return new AlterShareGroupOffsetsResponse(data); } @@ -3821,9 +3836,7 @@ private DeleteShareGroupOffsetsResponse createDeleteShareGroupOffsetsResponse() .setResponses(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() .setTopicName("topic-1") .setTopicId(Uuid.randomUuid()) - .setPartitions(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))))); + .setErrorCode(Errors.NONE.code()))); return new DeleteShareGroupOffsetsResponse(data); } @@ -3946,4 +3959,25 @@ public void testInvalidTaggedFieldsWithSaslAuthenticateRequest() { parseRequest(SASL_AUTHENTICATE, SASL_AUTHENTICATE.latestVersion(), accessor)).getMessage(); assertEquals("Error reading byte array of 32767 byte(s): only 3 byte(s) available", msg); } + + @Test + public void testListConfigResourcesRequestV0FailsWithConfigResourceTypeOtherThanClientMetrics() { + // One type which is not CLIENT_METRICS + Arrays.stream(ConfigResource.Type.values()) + .filter(t -> t != ConfigResource.Type.CLIENT_METRICS) + .forEach(t -> { + ListConfigResourcesRequestData data = new ListConfigResourcesRequestData() + .setResourceTypes(List.of(t.id())); + assertThrows(UnsupportedVersionException.class, () -> new ListConfigResourcesRequest.Builder(data).build((short) 0)); + }); + + // Multiple types with CLIENT_METRICS + Arrays.stream(ConfigResource.Type.values()) + .filter(t -> t != ConfigResource.Type.CLIENT_METRICS) + .forEach(t -> { + ListConfigResourcesRequestData data = new ListConfigResourcesRequestData() + .setResourceTypes(List.of(t.id(), ConfigResource.Type.CLIENT_METRICS.id())); + assertThrows(UnsupportedVersionException.class, () -> new ListConfigResourcesRequest.Builder(data).build((short) 0)); + }); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java index a0bea9095424f..bd6f98ed33937 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java @@ -57,7 +57,7 @@ public void testParse() { for (short version : ApiKeys.TXN_OFFSET_COMMIT.allVersions()) { TxnOffsetCommitResponse response = TxnOffsetCommitResponse.parse( - MessageUtil.toByteBufferAccessor(data, version).buffer(), version); + MessageUtil.toByteBufferAccessor(data, version), version); assertEquals(expectedErrorCounts, response.errorCounts()); assertEquals(throttleTimeMs, response.throttleTimeMs()); assertEquals(version >= 1, response.shouldClientThrottle(version)); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java index 7450b15599607..e63d1949c8aec 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java @@ -84,9 +84,9 @@ public void testUpdateFeaturesV0() { request = UpdateFeaturesRequest.parse(readable, UpdateFeaturesRequestData.LOWEST_SUPPORTED_VERSION); List updates = new ArrayList<>(request.featureUpdates()); - assertEquals(updates.size(), 2); - assertEquals(updates.get(0).upgradeType(), FeatureUpdate.UpgradeType.SAFE_DOWNGRADE); - assertEquals(updates.get(1).upgradeType(), FeatureUpdate.UpgradeType.UPGRADE); + assertEquals(2, updates.size()); + assertEquals(FeatureUpdate.UpgradeType.SAFE_DOWNGRADE, updates.get(0).upgradeType()); + assertEquals(FeatureUpdate.UpgradeType.UPGRADE, updates.get(1).upgradeType()); } @Test @@ -114,9 +114,9 @@ public void testUpdateFeaturesV1() { request = UpdateFeaturesRequest.parse(readable, UpdateFeaturesRequestData.HIGHEST_SUPPORTED_VERSION); List updates = new ArrayList<>(request.featureUpdates()); - assertEquals(updates.size(), 2); - assertEquals(updates.get(0).upgradeType(), FeatureUpdate.UpgradeType.SAFE_DOWNGRADE); - assertEquals(updates.get(1).upgradeType(), FeatureUpdate.UpgradeType.UPGRADE); + assertEquals(2, updates.size()); + assertEquals(FeatureUpdate.UpgradeType.SAFE_DOWNGRADE, updates.get(0).upgradeType()); + assertEquals(FeatureUpdate.UpgradeType.UPGRADE, updates.get(1).upgradeType()); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java index 59b08fc147691..760b1afc41fa6 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java @@ -39,10 +39,12 @@ import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; import javax.security.auth.login.Configuration; +import static org.apache.kafka.common.security.JaasContext.throwIfLoginModuleIsNotAllowed; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** @@ -224,7 +226,7 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { "SOME-MECHANISM", Collections.emptyMap())); - //Remove default value for org.apache.kafka.disallowed.login.modules + // clear disallowed login modules System.setProperty(DISALLOWED_LOGIN_MODULES_CONFIG, ""); checkConfiguration("com.sun.security.auth.module.JndiLoginModule", LoginModuleControlFlag.REQUIRED, new HashMap<>()); @@ -252,6 +254,39 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { checkEntry(context.configurationEntries().get(0), "com.sun.security.auth.module.LdapLoginModule", LoginModuleControlFlag.REQUISITE, Collections.emptyMap()); } + + @Test + void testAllowedLoginModulesSystemProperty() { + AppConfigurationEntry ldap = new AppConfigurationEntry( + "com.ibm.security.auth.module.LdapLoginModule", + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + Map.of() + ); + AppConfigurationEntry jndi = new AppConfigurationEntry( + "com.sun.security.auth.module.JndiLoginModule", + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + Map.of() + ); + // default + throwIfLoginModuleIsNotAllowed(ldap); + + // set allowed list, but not set disallowed list + System.setProperty(JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG, "com.ibm.security.auth.module.LdapLoginModule"); + throwIfLoginModuleIsNotAllowed(ldap); + assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(jndi)); + + // set both allowed list and disallowed list + System.setProperty(JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG, "com.ibm.security.auth.module.LdapLoginModule"); + throwIfLoginModuleIsNotAllowed(ldap); + assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(jndi)); + + // set disallowed list, but not set allowed list + System.clearProperty(JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG); + IllegalArgumentException error = assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(ldap)); + // Ensure the exception message includes the deprecation warning for the disallowed login modules config + assertTrue(error.getMessage().contains("The system property '" + DISALLOWED_LOGIN_MODULES_CONFIG + "' is deprecated.")); + throwIfLoginModuleIsNotAllowed(jndi); + } @Test public void testNumericOptionWithQuotes() throws Exception { diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java index 8261c90014cf3..13ffba2715d56 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java @@ -44,6 +44,7 @@ import org.apache.kafka.common.network.ChannelState; import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.network.NetworkReceive; import org.apache.kafka.common.network.NetworkSend; import org.apache.kafka.common.network.NetworkTestUtils; import org.apache.kafka.common.network.NioEchoServer; @@ -51,6 +52,7 @@ import org.apache.kafka.common.network.Selector; import org.apache.kafka.common.network.TransportLayer; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.types.SchemaException; import org.apache.kafka.common.requests.AbstractRequest; @@ -119,6 +121,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; @@ -155,6 +158,7 @@ public class SaslAuthenticatorTest { private static final long CONNECTIONS_MAX_REAUTH_MS_VALUE = 100L; private static final int BUFFER_SIZE = 4 * 1024; private static Time time = Time.SYSTEM; + private static boolean needLargeExpiration = false; private NioEchoServer server; private Selector selector; @@ -178,6 +182,7 @@ public void setup() throws Exception { @AfterEach public void teardown() throws Exception { + needLargeExpiration = false; if (server != null) this.server.close(); if (selector != null) @@ -763,7 +768,7 @@ public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exceptio selector.send(new NetworkSend(node, request.toSend(header))); ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion((short) 0)); - ApiVersionsResponse response = ApiVersionsResponse.parse(responseBuffer, (short) 0); + ApiVersionsResponse response = ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), (short) 0); assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data().errorCode()); ApiVersion apiVersion = response.data().apiKeys().find(ApiKeys.API_VERSIONS.id); @@ -822,7 +827,7 @@ public void testInvalidApiVersionsRequest() throws Exception { ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion(version)); ApiVersionsResponse response = - ApiVersionsResponse.parse(responseBuffer, version); + ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), version); assertEquals(Errors.INVALID_REQUEST.code(), response.data().errorCode()); // Send ApiVersionsRequest with a supported version. This should succeed. @@ -861,7 +866,7 @@ public void testValidApiVersionsRequest() throws Exception { selector.send(new NetworkSend(node, request.toSend(header))); ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion(version)); - ApiVersionsResponse response = ApiVersionsResponse.parse(responseBuffer, version); + ApiVersionsResponse response = ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), version); assertEquals(Errors.NONE.code(), response.data().errorCode()); // Test that client can authenticate successfully @@ -1607,6 +1612,42 @@ public void testCannotReauthenticateWithDifferentPrincipal() throws Exception { server.verifyReauthenticationMetrics(0, 1); } + @Test + public void testReauthenticateWithLargeReauthValue() throws Exception { + // enable it, we'll get a large expiration timestamp token + needLargeExpiration = true; + String node = "0"; + SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; + + configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, + List.of(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); + // set a large re-auth timeout in server side + saslServerConfigs.put(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG, Long.MAX_VALUE); + server = createEchoServer(securityProtocol); + + // set to default value for sasl login configs for initialization in ExpiringCredentialRefreshConfig + saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR); + saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_JITTER); + saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS, SaslConfigs.DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS); + saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, SaslConfigs.DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS); + saslClientConfigs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, AlternateLoginCallbackHandler.class); + + createCustomClientConnection(securityProtocol, OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, node, true); + + // channel should be not null before sasl handshake + assertNotNull(selector.channel(node)); + + TestUtils.waitForCondition(() -> { + selector.poll(1000); + // this channel should be closed due to session timeout calculation overflow + return selector.channel(node) == null; + }, "channel didn't close with large re-authentication value"); + + // ensure metrics are as expected + server.verifyAuthenticationMetrics(0, 0); + server.verifyReauthenticationMetrics(0, 0); + } + @Test public void testCorrelationId() { SaslClientAuthenticator authenticator = new SaslClientAuthenticator( @@ -1855,6 +1896,69 @@ public void testSslClientAuthRequiredOverriddenForSaslSslListener() throws Excep verifySslClientAuthForSaslSslListener(false, SslClientAuth.REQUIRED); } + @Test + public void testServerSidePendingSendDuringReauthentication() throws Exception { + SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; + TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); + jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_SERVER, PlainLoginModule.class.getName(), new HashMap<>()); + jaasConfig.setClientOptions("PLAIN", TestServerCallbackHandler.USERNAME, TestServerCallbackHandler.PASSWORD); + String callbackPrefix = ListenerName.forSecurityProtocol(securityProtocol).saslMechanismConfigPrefix("PLAIN"); + saslServerConfigs.put(callbackPrefix + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG, + TestServerCallbackHandler.class.getName()); + server = createEchoServer(securityProtocol); + + String node = "node1"; + try { + createClientConnection(securityProtocol, node); + NetworkTestUtils.waitForChannelReady(selector, node); + server.verifyAuthenticationMetrics(1, 0); + + /* + * Now start the reauthentication on the connection. First, we have to sleep long enough so + * that the next write will cause re-authentication + */ + delay((long) (CONNECTIONS_MAX_REAUTH_MS_VALUE * 1.1)); + server.verifyReauthenticationMetrics(0, 0); + + // block reauthentication to complete + TestServerCallbackHandler.sem.acquire(); + + String prefix = TestUtils.randomString(100); + // send a client request to start a reauthentication. + selector.send(new NetworkSend(node, ByteBufferSend.sizePrefixed(ByteBuffer.wrap((prefix + "-0").getBytes(StandardCharsets.UTF_8))))); + // wait till reauthentication is blocked + TestUtils.waitForCondition(() -> { + selector.poll(10L); + return TestServerCallbackHandler.sem.hasQueuedThreads(); + }, 5000, "Reauthentication is not blocked"); + + // Set the client's channel `send` to null to allow setting a new send on the server's selector. + // Without this, NioEchoServer will throw an error while processing the client request, + // since we're manually setting a server side send to simulate the issue. + TestUtils.setFieldValue(selector.channel(node), "send", null); + + // extract the channel id from the server's selector and directly set a send on it. + String channelId = server.selector().channels().get(0).id(); + String payload = prefix + "-1"; + server.selector().send(new NetworkSend(channelId, ByteBufferSend.sizePrefixed(ByteBuffer.wrap(payload.getBytes(StandardCharsets.UTF_8))))); + // allow reauthentication to complete + TestServerCallbackHandler.sem.release(); + + TestUtils.waitForCondition(() -> { + selector.poll(10L); + for (NetworkReceive receive : selector.completedReceives()) { + assertEquals(payload, new String(Utils.toArray(receive.payload()), StandardCharsets.UTF_8)); + return true; + } + return false; + }, 5000, "Failed Receive the server send after reauthentication"); + + server.verifyReauthenticationMetrics(1, 0); + } finally { + closeClientConnectionIfNecessary(); + } + } + private void verifySslClientAuthForSaslSslListener(boolean useListenerPrefix, SslClientAuth configuredClientAuth) throws Exception { @@ -1936,7 +2040,7 @@ private void createClientConnection(SecurityProtocol securityProtocol, String sa if (enableSaslAuthenticateHeader) createClientConnection(securityProtocol, node); else - createClientConnectionWithoutSaslAuthenticateHeader(securityProtocol, saslMechanism, node); + createCustomClientConnection(securityProtocol, saslMechanism, node, false); } private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception { @@ -2024,15 +2128,13 @@ protected void enableKafkaSaslAuthenticateHeaders(boolean flag) { return server; } - private void createClientConnectionWithoutSaslAuthenticateHeader(final SecurityProtocol securityProtocol, - final String saslMechanism, String node) throws Exception { - - final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); - final Map configs = Collections.emptyMap(); - final JaasContext jaasContext = JaasContext.loadClientContext(configs); - final Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); - - SaslChannelBuilder clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, + private SaslChannelBuilder saslChannelBuilderWithoutHeader( + final SecurityProtocol securityProtocol, + final String saslMechanism, + final Map jaasContexts, + final ListenerName listenerName + ) { + return new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, listenerName, false, saslMechanism, null, null, null, time, new LogContext(), null) { @@ -2059,6 +2161,42 @@ protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVe }; } }; + } + + private void createCustomClientConnection( + final SecurityProtocol securityProtocol, + final String saslMechanism, + String node, + boolean withSaslAuthenticateHeader + ) throws Exception { + + final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); + final Map configs = Collections.emptyMap(); + final JaasContext jaasContext = JaasContext.loadClientContext(configs); + final Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); + + SaslChannelBuilder clientChannelBuilder; + if (!withSaslAuthenticateHeader) { + clientChannelBuilder = saslChannelBuilderWithoutHeader(securityProtocol, saslMechanism, jaasContexts, listenerName); + } else { + clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, + securityProtocol, listenerName, false, saslMechanism, + null, null, null, time, new LogContext(), null) { + + @Override + protected SaslClientAuthenticator buildClientAuthenticator(Map configs, + AuthenticateCallbackHandler callbackHandler, + String id, + String serverHost, + String servicePrincipal, + TransportLayer transportLayer, + Subject subject) { + + return new SaslClientAuthenticator(configs, callbackHandler, id, subject, + servicePrincipal, serverHost, saslMechanism, transportLayer, time, new LogContext()); + } + }; + } clientChannelBuilder.configure(saslClientConfigs); this.selector = NetworkTestUtils.createSelector(clientChannelBuilder, time); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -2310,6 +2448,7 @@ public static class TestServerCallbackHandler extends PlainServerCallbackHandler static final String USERNAME = "TestServerCallbackHandler-user"; static final String PASSWORD = "TestServerCallbackHandler-password"; private volatile boolean configured; + public static Semaphore sem = new Semaphore(1); @Override public void configure(Map configs, String mechanism, List jaasConfigEntries) { @@ -2323,7 +2462,14 @@ public void configure(Map configs, String mechanism, List authenticator.authenticate()); + assertThrows(InvalidRequestException.class, authenticator::authenticate); verify(transportLayer, times(2)).read(any(ByteBuffer.class)); } @@ -155,7 +156,7 @@ public void testInvalidRequestHeader() throws IOException { return headerBuffer.remaining(); }); - assertThrows(InvalidRequestException.class, () -> authenticator.authenticate()); + assertThrows(InvalidRequestException.class, authenticator::authenticate); verify(transportLayer, times(2)).read(any(ByteBuffer.class)); } @@ -198,7 +199,7 @@ public void testSessionExpiresAtTokenExpiryDespiteNoReauthIsSet() throws IOExcep ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); assertEquals(tokenExpirationDuration.toMillis(), response.sessionLifetimeMs()); } } @@ -231,7 +232,7 @@ public void testSessionExpiresAtMaxReauthTime() throws IOException { ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); assertEquals(maxReauthMs, response.sessionLifetimeMs()); } } @@ -264,11 +265,40 @@ public void testSessionExpiresAtTokenExpiry() throws IOException { ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); assertEquals(tokenExpiryShorterThanMaxReauth.toMillis(), response.sessionLifetimeMs()); } } + @Test + public void testSessionWontExpireWithLargeExpirationTime() throws IOException { + String mechanism = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; + SaslServer saslServer = mock(SaslServer.class); + MockTime time = new MockTime(0, 1, 1000); + // set a Long.MAX_VALUE as the expiration time + Duration largeExpirationTime = Duration.ofMillis(Long.MAX_VALUE); + + try ( + MockedStatic ignored = mockSaslServer(saslServer, mechanism, time, largeExpirationTime); + MockedStatic ignored2 = mockKafkaPrincipal("[principal-type]", "[principal-name"); + TransportLayer transportLayer = mockTransportLayer() + ) { + + SaslServerAuthenticator authenticator = getSaslServerAuthenticatorForOAuth(mechanism, transportLayer, time, largeExpirationTime.toMillis()); + + mockRequest(saslHandshakeRequest(mechanism), transportLayer); + authenticator.authenticate(); + + when(saslServer.isComplete()).thenReturn(false).thenReturn(true); + mockRequest(saslAuthenticateRequest(), transportLayer); + + Throwable t = assertThrows(IllegalArgumentException.class, authenticator::authenticate); + assertEquals(ArithmeticException.class, t.getCause().getClass()); + assertEquals("Cannot convert " + Long.MAX_VALUE + " millisecond to nanosecond due to arithmetic overflow", + t.getMessage()); + } + } + private SaslServerAuthenticator getSaslServerAuthenticatorForOAuth(String mechanism, TransportLayer transportLayer, Time time, Long maxReauth) { Map configs = Collections.singletonMap(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList(mechanism)); diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java index 5980a0d3b3c5f..31c01849bc7df 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java @@ -26,12 +26,12 @@ public class KerberosRuleTest { @Test public void testReplaceParameters() throws BadFormatString { // positive test cases - assertEquals(KerberosRule.replaceParameters("", new String[0]), ""); - assertEquals(KerberosRule.replaceParameters("hello", new String[0]), "hello"); - assertEquals(KerberosRule.replaceParameters("", new String[]{"too", "many", "parameters", "are", "ok"}), ""); - assertEquals(KerberosRule.replaceParameters("hello", new String[]{"too", "many", "parameters", "are", "ok"}), "hello"); - assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"too", "many", "parameters", "are", "ok"}), "hello too"); - assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"}), "hello no recursion $1"); + assertEquals("", KerberosRule.replaceParameters("", new String[0])); + assertEquals("hello", KerberosRule.replaceParameters("hello", new String[0])); + assertEquals("", KerberosRule.replaceParameters("", new String[]{"too", "many", "parameters", "are", "ok"})); + assertEquals("hello", KerberosRule.replaceParameters("hello", new String[]{"too", "many", "parameters", "are", "ok"})); + assertEquals("hello too", KerberosRule.replaceParameters("hello $0", new String[]{"too", "many", "parameters", "are", "ok"})); + assertEquals("hello no recursion $1", KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"})); // negative test cases assertThrows( diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidatorTest.java similarity index 73% rename from clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java rename to clients/src/test/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidatorTest.java index 4db20e9ee10d6..5f76f508513b7 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/BrokerJwtValidatorTest.java @@ -15,29 +15,28 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; import org.jose4j.jwk.PublicJsonWebKey; import org.jose4j.jws.AlgorithmIdentifiers; import org.jose4j.lang.InvalidAlgorithmException; import org.junit.jupiter.api.Test; -import java.util.Collections; +import java.util.Map; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; import static org.junit.jupiter.api.Assertions.assertEquals; -public class ValidatorAccessTokenValidatorTest extends AccessTokenValidatorTest { +public class BrokerJwtValidatorTest extends JwtValidatorTest { @Override - protected AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder builder) { - return new ValidatorAccessTokenValidator(30, - Collections.emptySet(), - null, - (jws, nestingContext) -> builder.jwk().getKey(), - builder.scopeClaimName(), - builder.subjectClaimName()); + protected JwtValidator createJwtValidator(AccessTokenBuilder builder) { + CloseableVerificationKeyResolver resolver = (jws, nestingContext) -> builder.jwk().getKey(); + return new BrokerJwtValidator(resolver); } @Test @@ -72,7 +71,9 @@ public void testMissingSubShouldBeValid() throws Exception { .addCustomClaim(subClaimName, subject) .subjectClaimName(subClaimName) .subject(null); - AccessTokenValidator validator = createAccessTokenValidator(tokenBuilder); + JwtValidator validator = createJwtValidator(tokenBuilder); + Map saslConfigs = getSaslConfigs(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, subClaimName); + validator.configure(saslConfigs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()); // Validation should succeed (e.g. signature verification) even if sub claim is missing OAuthBearerToken token = validator.validate(tokenBuilder.build()); @@ -82,7 +83,8 @@ public void testMissingSubShouldBeValid() throws Exception { private void testEncryptionAlgorithm(PublicJsonWebKey jwk, String alg) throws Exception { AccessTokenBuilder builder = new AccessTokenBuilder().jwk(jwk).alg(alg); - AccessTokenValidator validator = createAccessTokenValidator(builder); + JwtValidator validator = createJwtValidator(builder); + validator.configure(getSaslConfigs(), OAUTHBEARER_MECHANISM, getJaasConfigEntries()); String accessToken = builder.build(); OAuthBearerToken token = validator.validate(accessToken); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClaimValidationUtilsTest.java similarity index 72% rename from clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java rename to clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClaimValidationUtilsTest.java index 89387797cdc30..e468b93ba6164 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClaimValidationUtilsTest.java @@ -15,7 +15,10 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.ClaimValidationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; import org.junit.jupiter.api.Test; @@ -42,15 +45,15 @@ public void testValidateScopes() { @Test public void testValidateScopesDisallowsDuplicates() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", "a"))); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", " a "))); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", "a"))); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", " a "))); } @Test public void testValidateScopesDisallowsEmptyNullAndWhitespace() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", ""))); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", null))); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", " "))); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", ""))); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", null))); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", " "))); } @Test @@ -100,12 +103,12 @@ public void testValidateExpirationAllowsZero() { @Test public void testValidateExpirationDisallowsNull() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateExpiration("exp", null)); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateExpiration("exp", null)); } @Test public void testValidateExpirationDisallowsNegatives() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateExpiration("exp", -1L)); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateExpiration("exp", -1L)); } @Test @@ -117,9 +120,9 @@ public void testValidateSubject() { @Test public void testValidateSubjectDisallowsEmptyNullAndWhitespace() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); } @Test @@ -131,9 +134,9 @@ public void testValidateClaimNameOverride() { @Test public void testValidateClaimNameOverrideDisallowsEmptyNullAndWhitespace() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); } @Test @@ -159,7 +162,7 @@ public void testValidateIssuedAtAllowsNull() { @Test public void testValidateIssuedAtDisallowsNegatives() { - assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateIssuedAt("iat", -1L)); + assertThrows(JwtValidatorException.class, () -> ClaimValidationUtils.validateIssuedAt("iat", -1L)); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidatorTest.java new file mode 100644 index 0000000000000..e30fd2a710362 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/ClientJwtValidatorTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.utils.Utils; + +import org.junit.jupiter.api.Test; + +import java.util.Base64; + +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class ClientJwtValidatorTest extends JwtValidatorTest { + + @Override + protected JwtValidator createJwtValidator(AccessTokenBuilder builder) { + return new ClientJwtValidator(); + } + + @Test + void testJwtRequiresBase64UrlDecoding() throws Exception { + String header = "{\"alg\":\"HS256\",\"typ\":\"JWT\"}"; + String payload = "{\"sub\": \"jdoe\", \"exp\": 0, \"iat\": 0, \"data\":\">>>___<<<---\"}"; + String signature = "dummysignature"; + String jwt = createJwt(header, payload, signature); + + // Verify that decoding the payload fails for "plain" base 64, but works with URL-safe base 64. + String urlEncodedPayload = Base64.getUrlEncoder().encodeToString(Utils.utf8(payload)); + assertThrows(IllegalArgumentException.class, () -> Base64.getDecoder().decode(urlEncodedPayload)); + assertDoesNotThrow(() -> Base64.getUrlDecoder().decode(urlEncodedPayload)); + + try (JwtValidator validator = createJwtValidator()) { + validator.configure(getSaslConfigs(), OAUTHBEARER_MECHANISM, getJaasConfigEntries()); + assertDoesNotThrow( + () -> validator.validate(jwt), + "Valid, URL-safe base 64-encoded JWT should be decodable" + ); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetrieverTest.java new file mode 100644 index 0000000000000..72d52b4fb5c82 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtRetrieverTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; + +public class DefaultJwtRetrieverTest extends OAuthBearerTest { + + @AfterEach + public void tearDown() throws Exception { + System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); + } + + @Test + public void testConfigureRefreshingFileJwtRetriever() throws Exception { + String expected = createJwt("jdoe"); + String file = tempFile(expected).toURI().toString(); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); + Map configs = Collections.singletonMap(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); + + try (JwtRetriever jwtRetriever = new DefaultJwtRetriever()) { + jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()); + assertEquals(expected, jwtRetriever.retrieve()); + } + } + + @Test + public void testConfigureRefreshingFileJwtRetrieverWithInvalidDirectory() throws IOException { + // Should fail because the parent path doesn't exist. + String file = new File("/tmp/this-directory-does-not-exist/foo.json").toURI().toString(); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); + + try (JwtRetriever jwtRetriever = new DefaultJwtRetriever()) { + assertThrowsWithMessage( + ConfigException.class, + () -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), + "that doesn't exist" + ); + } + } + + @Test + public void testSaslOauthbearerTokenEndpointUrlIsNotAllowed() throws Exception { + // Should fail because the URL was not allowed + String file = tempFile("test data").toURI().toString(); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); + + try (JwtRetriever jwtRetriever = new DefaultJwtRetriever()) { + assertThrowsWithMessage( + ConfigException.class, + () -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), + ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG + ); + } + } + + @Test + public void testConfigureWithAccessTokenFile() throws Exception { + String expected = createJwt("jdoe"); + String file = tempFile(expected).toURI().toString(); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); + + try (DefaultJwtRetriever jwtRetriever = new DefaultJwtRetriever()) { + assertDoesNotThrow(() -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries())); + assertInstanceOf(FileJwtRetriever.class, jwtRetriever.delegate()); + } + } + + @Test + public void testConfigureWithAccessClientCredentials() throws Exception { + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); + Map jaasConfigs = new HashMap<>(); + jaasConfigs.put(CLIENT_ID_CONFIG, "an ID"); + jaasConfigs.put(CLIENT_SECRET_CONFIG, "a secret"); + + try (DefaultJwtRetriever jwtRetriever = new DefaultJwtRetriever()) { + assertDoesNotThrow(() -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(jaasConfigs))); + assertInstanceOf(ClientCredentialsJwtRetriever.class, jwtRetriever.delegate()); + } + } + + @ParameterizedTest + @MethodSource("urlencodeHeaderSupplier") + public void testUrlencodeHeader(Map configs, boolean expectedValue) { + ConfigurationUtils cu = new ConfigurationUtils(configs); + boolean actualValue = ClientCredentialsJwtRetriever.validateUrlencodeHeader(cu); + assertEquals(expectedValue, actualValue); + } + + private static Stream urlencodeHeaderSupplier() { + return Stream.of( + Arguments.of(Collections.emptyMap(), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, null), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, true), true), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, false), false) + ); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidatorTest.java new file mode 100644 index 0000000000000..14c33a012c830 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/DefaultJwtValidatorTest.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; + +import org.jose4j.jws.AlgorithmIdentifiers; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; + +public class DefaultJwtValidatorTest extends OAuthBearerTest { + + @Test + public void testConfigureWithVerificationKeyResolver() { + AccessTokenBuilder builder = new AccessTokenBuilder() + .alg(AlgorithmIdentifiers.RSA_USING_SHA256); + CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); + Map configs = getSaslConfigs(); + DefaultJwtValidator jwtValidator = new DefaultJwtValidator(verificationKeyResolver); + assertDoesNotThrow(() -> jwtValidator.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries())); + assertInstanceOf(BrokerJwtValidator.class, jwtValidator.delegate()); + } + + @Test + public void testConfigureWithoutVerificationKeyResolver() { + Map configs = getSaslConfigs(); + DefaultJwtValidator jwtValidator = new DefaultJwtValidator(); + assertDoesNotThrow(() -> jwtValidator.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries())); + assertInstanceOf(ClientJwtValidator.class, jwtValidator.delegate()); + } + + private CloseableVerificationKeyResolver createVerificationKeyResolver(AccessTokenBuilder builder) { + return (jws, nestingContext) -> builder.jwk().getPublicKey(); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetrieverTest.java new file mode 100644 index 0000000000000..4a4e567dedfdf --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtBearerJwtRetrieverTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.List; +import java.util.Map; + +import javax.security.auth.login.AppConfigurationEntry; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JwtBearerJwtRetrieverTest extends OAuthBearerTest { + + @AfterEach + public void tearDown() throws Exception { + System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); + System.clearProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); + } + + @Test + public void testConfigure() throws Exception { + String tokenEndpointUrl = "https://www.example.com"; + String privateKeyFile = generatePrivateKey().getPath(); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, tokenEndpointUrl); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, privateKeyFile); + + Map configs = getSaslConfigs( + Map.of( + SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, tokenEndpointUrl, + SASL_OAUTHBEARER_ASSERTION_ALGORITHM, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, + SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, privateKeyFile + ) + ); + + List jaasConfigEntries = getJaasConfigEntries(); + + try (JwtBearerJwtRetriever jwtRetriever = new JwtBearerJwtRetriever()) { + assertDoesNotThrow(() -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, jaasConfigEntries)); + } + } + + @Test + public void testConfigureWithMalformedPrivateKey() throws Exception { + String tokenEndpointUrl = "https://www.example.com"; + String malformedPrivateKeyFile = tempFile().getPath(); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, tokenEndpointUrl); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, malformedPrivateKeyFile); + + Map configs = getSaslConfigs( + Map.of( + SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, tokenEndpointUrl, + SASL_OAUTHBEARER_ASSERTION_ALGORITHM, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, + SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, malformedPrivateKeyFile + ) + ); + + List jaasConfigEntries = getJaasConfigEntries(); + + try (JwtBearerJwtRetriever jwtRetriever = new JwtBearerJwtRetriever()) { + JwtRetrieverException e = assertThrows(JwtRetrieverException.class, () -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, jaasConfigEntries)); + assertNotNull(e.getCause()); + assertInstanceOf(GeneralSecurityException.class, e.getCause()); + } + } + + @Test + public void testConfigureWithStaticAssertion() throws Exception { + String tokenEndpointUrl = "https://www.example.com"; + String assertionFile = tempFile(createJwt("jdoe")).getPath(); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, tokenEndpointUrl); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, assertionFile); + + Map configs = getSaslConfigs( + Map.of( + SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, tokenEndpointUrl, + SASL_OAUTHBEARER_ASSERTION_ALGORITHM, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, + SASL_OAUTHBEARER_ASSERTION_FILE, assertionFile + ) + ); + + List jaasConfigEntries = getJaasConfigEntries(); + + try (JwtBearerJwtRetriever jwtRetriever = new JwtBearerJwtRetriever()) { + assertDoesNotThrow(() -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, jaasConfigEntries)); + } + } + + @Test + public void testConfigureWithInvalidPassphrase() throws Exception { + String tokenEndpointUrl = "https://www.example.com"; + String privateKeyFile = generatePrivateKey().getPath(); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, tokenEndpointUrl); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, privateKeyFile); + + Map configs = getSaslConfigs( + Map.of( + SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, tokenEndpointUrl, + SASL_OAUTHBEARER_ASSERTION_ALGORITHM, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, + SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, privateKeyFile, + SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE, "this-passphrase-is-invalid" + ) + ); + + List jaasConfigEntries = getJaasConfigEntries(); + + try (JwtBearerJwtRetriever jwtRetriever = new JwtBearerJwtRetriever()) { + JwtRetrieverException e = assertThrows(JwtRetrieverException.class, () -> jwtRetriever.configure(configs, OAUTHBEARER_MECHANISM, jaasConfigEntries)); + assertNotNull(e.getCause()); + assertInstanceOf(IOException.class, e.getCause()); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorTest.java similarity index 57% rename from clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java rename to clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorTest.java index 0adaf34bbbeea..09e01c42f3c80 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/JwtValidatorTest.java @@ -15,7 +15,10 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.internals.secured; +package org.apache.kafka.common.security.oauthbearer; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; import org.jose4j.jws.AlgorithmIdentifiers; import org.jose4j.jwx.HeaderParameterNames; @@ -26,67 +29,67 @@ import static org.junit.jupiter.api.Assertions.assertThrows; @TestInstance(Lifecycle.PER_CLASS) -public abstract class AccessTokenValidatorTest extends OAuthBearerTest { +public abstract class JwtValidatorTest extends OAuthBearerTest { - protected abstract AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder accessTokenBuilder) throws Exception; + protected abstract JwtValidator createJwtValidator(AccessTokenBuilder accessTokenBuilder) throws Exception; - protected AccessTokenValidator createAccessTokenValidator() throws Exception { + protected JwtValidator createJwtValidator() throws Exception { AccessTokenBuilder builder = new AccessTokenBuilder(); - return createAccessTokenValidator(builder); + return createJwtValidator(builder); } @Test public void testNull() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(null), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + JwtValidator validator = createJwtValidator(); + assertThrowsWithMessage(JwtValidatorException.class, () -> validator.validate(null), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testEmptyString() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(""), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + JwtValidator validator = createJwtValidator(); + assertThrowsWithMessage(JwtValidatorException.class, () -> validator.validate(""), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testWhitespace() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(" "), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + JwtValidator validator = createJwtValidator(); + assertThrowsWithMessage(JwtValidatorException.class, () -> validator.validate(" "), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testEmptySections() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(".."), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + JwtValidator validator = createJwtValidator(); + assertThrowsWithMessage(JwtValidatorException.class, () -> validator.validate(".."), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testMissingHeader() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); + JwtValidator validator = createJwtValidator(); String header = ""; String payload = createBase64JsonJwtSection(node -> { }); String signature = ""; String accessToken = String.format("%s.%s.%s", header, payload, signature); - assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + assertThrows(JwtValidatorException.class, () -> validator.validate(accessToken)); } @Test public void testMissingPayload() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); + JwtValidator validator = createJwtValidator(); String header = createBase64JsonJwtSection(node -> node.put(HeaderParameterNames.ALGORITHM, AlgorithmIdentifiers.NONE)); String payload = ""; String signature = ""; String accessToken = String.format("%s.%s.%s", header, payload, signature); - assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + assertThrows(JwtValidatorException.class, () -> validator.validate(accessToken)); } @Test public void testMissingSignature() throws Exception { - AccessTokenValidator validator = createAccessTokenValidator(); + JwtValidator validator = createJwtValidator(); String header = createBase64JsonJwtSection(node -> node.put(HeaderParameterNames.ALGORITHM, AlgorithmIdentifiers.NONE)); String payload = createBase64JsonJwtSection(node -> { }); String signature = ""; String accessToken = String.format("%s.%s.%s", header, payload, signature); - assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + assertThrows(JwtValidatorException.class, () -> validator.validate(accessToken)); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java index 5b1b2976662b6..54857cd8cc07b 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java @@ -21,26 +21,15 @@ import org.apache.kafka.common.security.auth.SaslExtensionsCallback; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse; import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetriever; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; -import org.apache.kafka.common.security.oauthbearer.internals.secured.FileTokenRetriever; -import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpAccessTokenRetriever; import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; -import org.apache.kafka.common.utils.Utils; import org.jose4j.jws.AlgorithmIdentifiers; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.io.File; import java.io.IOException; -import java.util.Base64; -import java.util.Calendar; -import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.TimeZone; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; @@ -49,8 +38,9 @@ import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.apache.kafka.test.TestUtils.tempFile; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -58,6 +48,7 @@ import static org.junit.jupiter.api.Assertions.fail; public class OAuthBearerLoginCallbackHandlerTest extends OAuthBearerTest { + @AfterEach public void tearDown() throws Exception { System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); @@ -70,9 +61,10 @@ public void testHandleTokenCallback() throws Exception { .jwk(createRsaJwk()) .alg(AlgorithmIdentifiers.RSA_USING_SHA256); String accessToken = builder.build(); - AccessTokenRetriever accessTokenRetriever = () -> accessToken; - - OAuthBearerLoginCallbackHandler handler = createHandler(accessTokenRetriever, configs); + JwtRetriever jwtRetriever = () -> accessToken; + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); @@ -91,7 +83,6 @@ public void testHandleTokenCallback() throws Exception { @Test public void testHandleSaslExtensionsCallback() throws Exception { - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); Map jaasConfig = new HashMap<>(); @@ -100,7 +91,11 @@ public void testHandleSaslExtensionsCallback() throws Exception { jaasConfig.put("extension_foo", "1"); jaasConfig.put("extension_bar", 2); jaasConfig.put("EXTENSION_baz", "3"); - configureHandler(handler, configs, jaasConfig); + + JwtRetriever jwtRetriever = createJwtRetriever(); + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(jaasConfig), jwtRetriever, jwtValidator); try { SaslExtensionsCallback callback = new SaslExtensionsCallback(); @@ -121,14 +116,17 @@ public void testHandleSaslExtensionsCallback() throws Exception { public void testHandleSaslExtensionsCallbackWithInvalidExtension() { String illegalKey = "extension_" + OAuthBearerClientInitialResponse.AUTH_KEY; - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); Map jaasConfig = new HashMap<>(); jaasConfig.put(CLIENT_ID_CONFIG, "an ID"); jaasConfig.put(CLIENT_SECRET_CONFIG, "a secret"); jaasConfig.put(illegalKey, "this key isn't allowed per OAuthBearerClientInitialResponse.validateExtensions"); - configureHandler(handler, configs, jaasConfig); + + JwtRetriever jwtRetriever = createJwtRetriever(); + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(jaasConfig), jwtRetriever, jwtValidator); try { SaslExtensionsCallback callback = new SaslExtensionsCallback(); @@ -143,10 +141,10 @@ public void testHandleSaslExtensionsCallbackWithInvalidExtension() { @Test public void testInvalidCallbackGeneratesUnsupportedCallbackException() { Map configs = getSaslConfigs(); + JwtRetriever jwtRetriever = () -> "test"; + JwtValidator jwtValidator = createJwtValidator(); OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - AccessTokenRetriever accessTokenRetriever = () -> "foo"; - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); - handler.init(accessTokenRetriever, accessTokenValidator); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); try { Callback unsupportedCallback = new Callback() { }; @@ -160,21 +158,23 @@ public void testInvalidCallbackGeneratesUnsupportedCallbackException() { public void testInvalidAccessToken() throws Exception { testInvalidAccessToken("this isn't valid", "Malformed JWT provided"); testInvalidAccessToken("this.isn't.valid", "malformed Base64 URL encoded value"); - testInvalidAccessToken(createAccessKey("this", "isn't", "valid"), "malformed JSON"); - testInvalidAccessToken(createAccessKey("{}", "{}", "{}"), "exp value must be non-null"); + testInvalidAccessToken(createJwt("this", "isn't", "valid"), "malformed JSON"); + testInvalidAccessToken(createJwt("{}", "{}", "{}"), "exp value must be non-null"); } @Test public void testMissingAccessToken() { - AccessTokenRetriever accessTokenRetriever = () -> { - throw new IOException("The token endpoint response access_token value must be non-null"); - }; Map configs = getSaslConfigs(); - OAuthBearerLoginCallbackHandler handler = createHandler(accessTokenRetriever, configs); + JwtRetriever jwtRetriever = () -> { + throw new JwtRetrieverException("The token endpoint response access_token value must be non-null"); + }; + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); - assertThrowsWithMessage(IOException.class, + assertThrowsWithMessage(JwtRetrieverException.class, () -> handler.handle(new Callback[]{callback}), "token endpoint response access_token value must be non-null"); } finally { @@ -184,19 +184,18 @@ public void testMissingAccessToken() { @Test public void testFileTokenRetrieverHandlesNewline() throws IOException { - Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); - long cur = cal.getTimeInMillis() / 1000; - String exp = "" + (cur + 60 * 60); // 1 hour in future - String iat = "" + cur; - - String expected = createAccessKey("{}", String.format("{\"exp\":%s, \"iat\":%s, \"sub\":\"subj\"}", exp, iat), "sign"); + String expected = createJwt("jdoe"); String withNewline = expected + "\n"; - File tmpDir = createTempDir("access-token"); - File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", withNewline); + String accessTokenFile = tempFile(withNewline).toURI().toString(); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile); + JwtRetriever jwtRetriever = new FileJwtRetriever(); + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); - Map configs = getSaslConfigs(); - OAuthBearerLoginCallbackHandler handler = createHandler(new FileTokenRetriever(accessTokenFile.toPath()), configs); OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); try { handler.handle(new Callback[]{callback}); @@ -211,39 +210,15 @@ public void testFileTokenRetrieverHandlesNewline() throws IOException { @Test public void testNotConfigured() { OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - assertThrowsWithMessage(IllegalStateException.class, () -> handler.handle(new Callback[] {}), "first call the configure or init method"); - } - - @Test - public void testConfigureWithAccessTokenFile() throws Exception { - String expected = "{}"; - - File tmpDir = createTempDir("access-token"); - File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", expected); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); - - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); - Map jaasConfigs = Collections.emptyMap(); - configureHandler(handler, configs, jaasConfigs); - assertInstanceOf(FileTokenRetriever.class, handler.getAccessTokenRetriever()); - } - - @Test - public void testConfigureWithAccessClientCredentials() { - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); - Map jaasConfigs = new HashMap<>(); - jaasConfigs.put(CLIENT_ID_CONFIG, "an ID"); - jaasConfigs.put(CLIENT_SECRET_CONFIG, "a secret"); - configureHandler(handler, configs, jaasConfigs); - assertInstanceOf(HttpAccessTokenRetriever.class, handler.getAccessTokenRetriever()); + assertThrowsWithMessage(IllegalStateException.class, () -> handler.handle(new Callback[] {}), "first call the configure method"); } private void testInvalidAccessToken(String accessToken, String expectedMessageSubstring) throws Exception { Map configs = getSaslConfigs(); - OAuthBearerLoginCallbackHandler handler = createHandler(() -> accessToken, configs); + JwtRetriever jwtRetriever = () -> accessToken; + JwtValidator jwtValidator = createJwtValidator(); + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); @@ -260,19 +235,11 @@ private void testInvalidAccessToken(String accessToken, String expectedMessageSu } } - private String createAccessKey(String header, String payload, String signature) { - Base64.Encoder enc = Base64.getEncoder(); - header = enc.encodeToString(Utils.utf8(header)); - payload = enc.encodeToString(Utils.utf8(payload)); - signature = enc.encodeToString(Utils.utf8(signature)); - return String.format("%s.%s.%s", header, payload, signature); + private static DefaultJwtRetriever createJwtRetriever() { + return new DefaultJwtRetriever(); } - private OAuthBearerLoginCallbackHandler createHandler(AccessTokenRetriever accessTokenRetriever, Map configs) { - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); - handler.init(accessTokenRetriever, accessTokenValidator); - return handler; + private static DefaultJwtValidator createJwtValidator() { + return new DefaultJwtValidator(); } - } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java index d682a05ec11cc..adabec6bc958d 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java @@ -17,27 +17,29 @@ package org.apache.kafka.common.security.oauthbearer; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; -import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; -import org.apache.kafka.common.utils.Utils; import org.jose4j.jws.AlgorithmIdentifiers; import org.junit.jupiter.api.Test; +import java.io.IOException; import java.util.Arrays; -import java.util.Base64; import java.util.List; import java.util.Map; import javax.security.auth.callback.Callback; +import javax.security.auth.login.AppConfigurationEntry; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class OAuthBearerValidatorCallbackHandlerTest extends OAuthBearerTest { @@ -53,7 +55,16 @@ public void testBasic() throws Exception { String accessToken = builder.build(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_EXPECTED_AUDIENCE, allAudiences); - OAuthBearerValidatorCallbackHandler handler = createHandler(configs, builder); + CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); + JwtValidator jwtValidator = createJwtValidator(verificationKeyResolver); + OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); + handler.configure( + configs, + OAUTHBEARER_MECHANISM, + getJaasConfigEntries(), + verificationKeyResolver, + jwtValidator + ); try { OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(accessToken); @@ -77,13 +88,90 @@ public void testInvalidAccessToken() throws Exception { String substring = "invalid_token"; assertInvalidAccessTokenFails("this isn't valid", substring); assertInvalidAccessTokenFails("this.isn't.valid", substring); - assertInvalidAccessTokenFails(createAccessKey("this", "isn't", "valid"), substring); - assertInvalidAccessTokenFails(createAccessKey("{}", "{}", "{}"), substring); + assertInvalidAccessTokenFails(createJwt("this", "isn't", "valid"), substring); + assertInvalidAccessTokenFails(createJwt("{}", "{}", "{}"), substring); + } + + @Test + public void testHandlerConfigureThrowsException() throws IOException { + KafkaException configureError = new KafkaException("configure() error"); + + AccessTokenBuilder builder = new AccessTokenBuilder() + .alg(AlgorithmIdentifiers.RSA_USING_SHA256); + CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); + JwtValidator jwtValidator = new JwtValidator() { + @Override + public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + throw configureError; + } + + @Override + public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { + return null; + } + }; + + OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); + + // An error initializing the JwtValidator should cause OAuthBearerValidatorCallbackHandler.init() to fail. + KafkaException error = assertThrows( + KafkaException.class, + () -> handler.configure( + getSaslConfigs(), + OAUTHBEARER_MECHANISM, + getJaasConfigEntries(), + verificationKeyResolver, + jwtValidator + ) + ); + assertEquals(configureError, error); + } + + @Test + public void testHandlerCloseDoesNotThrowException() throws IOException { + AccessTokenBuilder builder = new AccessTokenBuilder() + .alg(AlgorithmIdentifiers.RSA_USING_SHA256); + CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); + JwtValidator jwtValidator = new JwtValidator() { + @Override + public void close() throws IOException { + throw new IOException("close() error"); + } + + @Override + public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { + return null; + } + }; + + OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); + handler.configure( + getSaslConfigs(), + OAUTHBEARER_MECHANISM, + getJaasConfigEntries(), + verificationKeyResolver, + jwtValidator + ); + + // An error closings the JwtValidator should *not* cause OAuthBearerValidatorCallbackHandler.close() to fail. + assertDoesNotThrow(handler::close); } private void assertInvalidAccessTokenFails(String accessToken, String expectedMessageSubstring) throws Exception { + AccessTokenBuilder builder = new AccessTokenBuilder() + .alg(AlgorithmIdentifiers.RSA_USING_SHA256); Map configs = getSaslConfigs(); - OAuthBearerValidatorCallbackHandler handler = createHandler(configs, new AccessTokenBuilder()); + CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); + JwtValidator jwtValidator = createJwtValidator(verificationKeyResolver); + + OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); + handler.configure( + configs, + OAUTHBEARER_MECHANISM, + getJaasConfigEntries(), + verificationKeyResolver, + jwtValidator + ); try { OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(accessToken); @@ -98,22 +186,11 @@ private void assertInvalidAccessTokenFails(String accessToken, String expectedMe } } - private OAuthBearerValidatorCallbackHandler createHandler(Map options, - AccessTokenBuilder builder) { - OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); - CloseableVerificationKeyResolver verificationKeyResolver = (jws, nestingContext) -> - builder.jwk().getPublicKey(); - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(options, verificationKeyResolver); - handler.init(verificationKeyResolver, accessTokenValidator); - return handler; + private JwtValidator createJwtValidator(CloseableVerificationKeyResolver verificationKeyResolver) { + return new DefaultJwtValidator(verificationKeyResolver); } - private String createAccessKey(String header, String payload, String signature) { - Base64.Encoder enc = Base64.getEncoder(); - header = enc.encodeToString(Utils.utf8(header)); - payload = enc.encodeToString(Utils.utf8(payload)); - signature = enc.encodeToString(Utils.utf8(signature)); - return String.format("%s.%s.%s", header, payload, signature); + private CloseableVerificationKeyResolver createVerificationKeyResolver(AccessTokenBuilder builder) { + return (jws, nestingContext) -> builder.jwk().getPublicKey(); } - } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java index cc910e0d16c4f..b0828d5d2815e 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java @@ -36,6 +36,10 @@ public class AccessTokenBuilder { + private final String scopeClaimName = "scope"; + + private final Long issuedAtSeconds; + private final ObjectMapper objectMapper = new ObjectMapper(); private String alg; @@ -48,10 +52,6 @@ public class AccessTokenBuilder { private Object scope = "engineering"; - private final String scopeClaimName = "scope"; - - private final Long issuedAtSeconds; - private Long expirationSeconds; private PublicJsonWebKey jwk; diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java deleted file mode 100644 index 3e85f7b0ce4fa..0000000000000 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import org.apache.kafka.common.config.ConfigException; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.File; -import java.util.Collections; -import java.util.Map; -import java.util.stream.Stream; - -import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; -import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; -import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; -import static org.junit.jupiter.api.Assertions.assertEquals; - -public class AccessTokenRetrieverFactoryTest extends OAuthBearerTest { - - @AfterEach - public void tearDown() throws Exception { - System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); - } - - @Test - public void testConfigureRefreshingFileAccessTokenRetriever() throws Exception { - String expected = "{}"; - - File tmpDir = createTempDir("access-token"); - File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", expected); - - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); - Map configs = Collections.singletonMap(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); - Map jaasConfig = Collections.emptyMap(); - - try (AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, jaasConfig)) { - accessTokenRetriever.init(); - assertEquals(expected, accessTokenRetriever.retrieve()); - } - } - - @Test - public void testConfigureRefreshingFileAccessTokenRetrieverWithInvalidDirectory() { - // Should fail because the parent path doesn't exist. - String file = new File("/tmp/this-directory-does-not-exist/foo.json").toURI().toString(); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); - Map jaasConfig = Collections.emptyMap(); - assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, jaasConfig), "that doesn't exist"); - } - - @Test - public void testConfigureRefreshingFileAccessTokenRetrieverWithInvalidFile() throws Exception { - // Should fail because while the parent path exists, the file itself doesn't. - File tmpDir = createTempDir("this-directory-does-exist"); - File accessTokenFile = new File(tmpDir, "this-file-does-not-exist.json"); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); - Map jaasConfig = Collections.emptyMap(); - assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, jaasConfig), "that doesn't exist"); - } - - @Test - public void testSaslOauthbearerTokenEndpointUrlIsNotAllowed() throws Exception { - // Should fail if the URL is not allowed - File tmpDir = createTempDir("not_allowed"); - File accessTokenFile = new File(tmpDir, "not_allowed.json"); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); - assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, Collections.emptyMap()), - ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); - } - - @ParameterizedTest - @MethodSource("urlencodeHeaderSupplier") - public void testUrlencodeHeader(Map configs, boolean expectedValue) { - ConfigurationUtils cu = new ConfigurationUtils(configs); - boolean actualValue = AccessTokenRetrieverFactory.validateUrlencodeHeader(cu); - assertEquals(expectedValue, actualValue); - } - - private static Stream urlencodeHeaderSupplier() { - return Stream.of( - Arguments.of(Collections.emptyMap(), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), - Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, null), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), - Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, true), true), - Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, false), false) - ); - } - -} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java deleted file mode 100644 index 2fd02e3f9a826..0000000000000 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler; - -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.util.Map; - -public class AccessTokenValidatorFactoryTest extends OAuthBearerTest { - - @Test - public void testConfigureThrowsExceptionOnAccessTokenValidatorInit() { - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - AccessTokenRetriever accessTokenRetriever = new AccessTokenRetriever() { - @Override - public void init() throws IOException { - throw new IOException("My init had an error!"); - } - @Override - public String retrieve() { - return "dummy"; - } - }; - - Map configs = getSaslConfigs(); - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); - - assertThrowsWithMessage( - KafkaException.class, () -> handler.init(accessTokenRetriever, accessTokenValidator), "encountered an error when initializing"); - } - - @Test - public void testConfigureThrowsExceptionOnAccessTokenValidatorClose() { - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - AccessTokenRetriever accessTokenRetriever = new AccessTokenRetriever() { - @Override - public void close() throws IOException { - throw new IOException("My close had an error!"); - } - @Override - public String retrieve() { - return "dummy"; - } - }; - - Map configs = getSaslConfigs(); - AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); - handler.init(accessTokenRetriever, accessTokenValidator); - - // Basically asserting this doesn't throw an exception :( - handler.close(); - } - -} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFileTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFileTest.java new file mode 100644 index 0000000000000..e22056c663db1 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CachedFileTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.utils.Utils; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.util.List; + +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class CachedFileTest extends OAuthBearerTest { + + @Test + public void testStaticPolicy() throws Exception { + File tmpFile = tempFile(" foo "); + + CachedFile.Transformer transformer = (file, contents) -> contents.trim(); + CachedFile.RefreshPolicy refreshPolicy = CachedFile.RefreshPolicy.staticPolicy(); + CachedFile cachedFile = new CachedFile<>(tmpFile, transformer, refreshPolicy); + + assertEquals(cachedFile.lastModified(), tmpFile.lastModified()); + assertEquals(7, cachedFile.size()); + assertEquals(" foo ", cachedFile.contents()); + assertEquals("foo", cachedFile.transformed()); + + // Sleep for a bit to make sure our timestamp changes, then update the file. + Utils.sleep(10); + Files.writeString(tmpFile.toPath(), " bar baz ", StandardOpenOption.WRITE, StandardOpenOption.APPEND); + + assertNotEquals(cachedFile.lastModified(), tmpFile.lastModified()); + assertNotEquals(cachedFile.size(), tmpFile.length()); + assertEquals(7, cachedFile.size()); + assertEquals(" foo ", cachedFile.contents()); + assertEquals("foo", cachedFile.transformed()); + } + + @Test + public void testLastModifiedPolicy() throws Exception { + File tmpFile = tempFile(" foo "); + + CachedFile.Transformer transformer = (file, contents) -> contents.trim(); + CachedFile.RefreshPolicy refreshPolicy = CachedFile.RefreshPolicy.lastModifiedPolicy(); + CachedFile cachedFile = new CachedFile<>(tmpFile, transformer, refreshPolicy); + + assertEquals(cachedFile.lastModified(), tmpFile.lastModified()); + assertEquals(7, cachedFile.size()); + assertEquals(" foo ", cachedFile.contents()); + assertEquals("foo", cachedFile.transformed()); + + // Sleep for a bit to make sure our timestamp changes, then update the file. + Utils.sleep(10); + Files.writeString(tmpFile.toPath(), " bar baz ", StandardOpenOption.WRITE, StandardOpenOption.APPEND); + + assertEquals(18, cachedFile.size()); + assertEquals(" foo bar baz ", cachedFile.contents()); + assertEquals("foo bar baz", cachedFile.transformed()); + } + + @Test + public void testFileDoesNotExist() throws IOException { + File tmpFile = tempFile(" foo "); + + CachedFile.RefreshPolicy refreshPolicy = CachedFile.RefreshPolicy.lastModifiedPolicy(); + CachedFile cachedFile = new CachedFile<>(tmpFile, CachedFile.STRING_NOOP_TRANSFORMER, refreshPolicy); + + // All is well... + assertTrue(tmpFile.exists()); + assertDoesNotThrow(cachedFile::size); + assertDoesNotThrow(cachedFile::lastModified); + assertDoesNotThrow(cachedFile::contents); + assertDoesNotThrow(cachedFile::transformed); + + // Delete the file and ensure that exceptions are thrown + assertTrue(tmpFile.delete()); + Utils.sleep(50); + + assertFalse(tmpFile.exists()); + assertThrows(KafkaException.class, cachedFile::size); + assertThrows(KafkaException.class, cachedFile::lastModified); + assertThrows(KafkaException.class, cachedFile::contents); + assertThrows(KafkaException.class, cachedFile::transformed); + + System.out.println("yo"); + + // "Restore" the file and make sure it's refreshed. + Utils.sleep(10); + Files.writeString(tmpFile.toPath(), "valid data!", StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + + assertTrue(tmpFile.exists()); + assertDoesNotThrow(cachedFile::size); + assertDoesNotThrow(cachedFile::lastModified); + assertDoesNotThrow(cachedFile::contents); + assertDoesNotThrow(cachedFile::transformed); + } + + @Test + public void testTransformerError() throws Exception { + File tmpFile = tempFile("[\"foo\"]"); + + @SuppressWarnings("unchecked") + CachedFile.Transformer> jsonTransformer = (file, json) -> { + try { + ObjectMapper mapper = new ObjectMapper(); + return (List) mapper.readValue(json, List.class); + } catch (Exception e) { + throw new KafkaException(e); + } + }; + + CachedFile.RefreshPolicy> refreshPolicy = CachedFile.RefreshPolicy.lastModifiedPolicy(); + CachedFile> cachedFile = new CachedFile<>(tmpFile, jsonTransformer, refreshPolicy); + + assertEquals(List.of("foo"), cachedFile.transformed()); + + // Sleep then update the file with proper JSON. + Utils.sleep(10); + Files.writeString(tmpFile.toPath(), "[\"foo\", \"bar\", \"baz\"]", StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + + assertEquals(List.of("foo", "bar", "baz"), cachedFile.transformed()); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatterTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatterTest.java new file mode 100644 index 0000000000000..885abc569286c --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClientCredentialsRequestFormatterTest.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.config.ConfigException; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static org.apache.kafka.common.security.oauthbearer.internals.secured.ClientCredentialsRequestFormatter.GRANT_TYPE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class ClientCredentialsRequestFormatterTest extends OAuthBearerTest { + + public static final String CLIENT_ID = "jdoe"; + public static final String CLIENT_SECRET = "secret"; + public static final String SCOPE = "everythingeverything"; + + @Test + public void testFormatAuthorizationHeaderEncoding() { + // according to RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. + assertAuthorizationHeaderEquals("SOME_RANDOM_LONG_USER_01234", "9Q|0`8i~ute-n9ksjLWb\\50\"AX@UUED5E", false, "Basic U09NRV9SQU5ET01fTE9OR19VU0VSXzAxMjM0OjlRfDBgOGl+dXRlLW45a3NqTFdiXDUwIkFYQFVVRUQ1RQ=="); + // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 + assertAuthorizationHeaderEquals("user!@~'", "secret-(*)!", true, "Basic dXNlciUyMSU0MCU3RSUyNzpzZWNyZXQtJTI4KiUyOSUyMQ=="); + } + + @ParameterizedTest + @MethodSource("testFormatterMissingValuesSource") + public void testFormatterMissingValues(String clientId, String clientSecret, boolean urlencode) { + assertThrows( + ConfigException.class, + () -> new ClientCredentialsRequestFormatter( + clientId, + clientSecret, + SCOPE, + urlencode + ) + ); + } + + @ParameterizedTest + @MethodSource("testScopeEscapingSource") + public void testScopeEscaping(String scope, boolean urlencode, String expectedScope) { + String expected = "grant_type=" + GRANT_TYPE + "&scope=" + expectedScope; + assertRequestBodyEquals(scope, urlencode, expected); + } + + @ParameterizedTest + @MethodSource("testMissingScopesSource") + public void testMissingScopes(String scope, boolean urlencode) { + String expected = "grant_type=" + GRANT_TYPE; + assertRequestBodyEquals(scope, urlencode, expected); + } + + private static Stream testFormatterMissingValuesSource() { + String[] clientIds = new String[] {null, "", " ", CLIENT_ID}; + String[] clientSecrets = new String[] {null, "", " ", CLIENT_SECRET}; + boolean[] urlencodes = new boolean[] {true, false}; + + List list = new ArrayList<>(); + + for (String clientId : clientIds) { + for (String clientSecret : clientSecrets) { + for (boolean urlencode : urlencodes) { + if (CLIENT_ID.equals(clientId) && CLIENT_SECRET.equals(clientSecret)) + continue; + + list.add(Arguments.of(clientId, clientSecret, urlencode)); + } + } + } + + return list.stream(); + } + + private static Stream testMissingScopesSource() { + String[] scopes = new String[] {null, "", " "}; + boolean[] urlencodes = new boolean[] {true, false}; + + List list = new ArrayList<>(); + + for (String scope : scopes) { + for (boolean urlencode : urlencodes) { + list.add(Arguments.of(scope, urlencode)); + } + } + + return list.stream(); + } + + private static Stream testScopeEscapingSource() { + return Stream.of( + Arguments.of("test-scope", true, "test-scope"), + Arguments.of("test-scope", false, "test-scope"), + Arguments.of("earth is great!", true, "earth+is+great%21"), + Arguments.of("earth is great!", false, "earth is great!"), + Arguments.of("what on earth?!?!?", true, "what+on+earth%3F%21%3F%21%3F"), + Arguments.of("what on earth?!?!?", false, "what on earth?!?!?") + ); + } + + private void assertRequestBodyEquals(String scope, boolean urlencode, String expected) { + ClientCredentialsRequestFormatter formatter = new ClientCredentialsRequestFormatter( + CLIENT_ID, + CLIENT_SECRET, + scope, + urlencode + ); + String actual = formatter.formatBody(); + assertEquals(expected, actual); + } + + private void assertAuthorizationHeaderEquals(String clientId, String clientSecret, boolean urlencode, String expected) { + ClientCredentialsRequestFormatter formatter = new ClientCredentialsRequestFormatter(clientId, clientSecret, SCOPE, urlencode); + Map headers = formatter.formatHeaders(); + String actual = headers.get("Authorization"); + assertEquals(expected, actual, String.format("Expected the HTTP Authorization header generated for client ID \"%s\" and client secret \"%s\" to match", clientId, clientSecret)); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java index 9a62f480215f7..efc41d64b3290 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java @@ -26,16 +26,16 @@ import java.io.File; import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Map; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; public class ConfigurationUtilsTest extends OAuthBearerTest { - private static final String URL_CONFIG_NAME = "url"; - private static final String FILE_CONFIG_NAME = "file"; + private static final String URL_CONFIG_NAME = "fictitious.url.config"; + private static final String FILE_CONFIG_NAME = "fictitious.file.config"; @AfterEach public void tearDown() throws Exception { @@ -59,7 +59,7 @@ public void testUrlCaseInsensitivity() { @Test public void testUrlFile() { - testUrl("file:///tmp/foo.txt"); + assertThrowsWithMessage(ConfigException.class, () -> testFileUrl("file:///tmp/foo.txt"), "that doesn't exist"); } @Test @@ -74,41 +74,34 @@ public void testUrlMissingProtocol() { @Test public void testUrlInvalidProtocol() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl("ftp://ftp.example.com"), "invalid protocol"); + assertThrowsWithMessage(ConfigException.class, () -> testFileUrl("ftp://ftp.example.com"), "invalid protocol"); } @Test public void testUrlNull() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(null), "must be non-null"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(null), "is required"); } @Test public void testUrlEmptyString() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(""), "must not contain only whitespace"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(""), "is required"); } @Test public void testUrlWhitespace() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(" "), "must not contain only whitespace"); - } - - private void testUrl(String value) { - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); - Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); - ConfigurationUtils cu = new ConfigurationUtils(configs); - cu.validateUrl(URL_CONFIG_NAME); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(" "), "is required"); } @Test public void testFile() throws IOException { File file = TestUtils.tempFile("some contents!"); - testFile(file.toURI().toURL().toString()); + testFile(file.getAbsolutePath()); } @Test public void testFileWithSuperfluousWhitespace() throws IOException { File file = TestUtils.tempFile(); - testFile(String.format(" %s ", file.toURI().toURL())); + testFile(String.format(" %s ", file.getAbsolutePath())); } @Test @@ -123,56 +116,90 @@ public void testFileUnreadable() throws IOException { if (!file.setReadable(false)) throw new IllegalStateException(String.format("Can't test file permissions as test couldn't programmatically make temp file %s un-readable", file.getAbsolutePath())); - assertThrowsWithMessage(ConfigException.class, () -> testFile(file.toURI().toURL().toString()), "that doesn't have read permission"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(file.getAbsolutePath()), "that doesn't have read permission"); } @Test public void testFileNull() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(null), "must be non-null"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(null), "is required"); } @Test public void testFileEmptyString() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(""), "must not contain only whitespace"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(""), "is required"); } @Test public void testFileWhitespace() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(" "), "must not contain only whitespace"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(" "), "is required"); } @Test public void testThrowIfURLIsNotAllowed() { String url = "http://www.example.com"; String fileUrl = "file:///etc/passwd"; - Map configs = new HashMap<>(); - configs.put(URL_CONFIG_NAME, url); - configs.put(FILE_CONFIG_NAME, fileUrl); - ConfigurationUtils cu = new ConfigurationUtils(configs); + ConfigurationUtils cu = new ConfigurationUtils(Map.of()); // By default, no URL is allowed - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(url), + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(fileUrl), + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); // add one url into allowed list System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, url); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(url)); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(fileUrl), + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url)); + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); // add all urls into allowed list System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, url + "," + fileUrl); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(url)); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(fileUrl)); + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url)); + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl)); } - protected void testFile(String value) { + @Test + public void testThrowIfFileIsNotAllowed() { + String file1 = "file1"; + String file2 = "file2"; + ConfigurationUtils cu = new ConfigurationUtils(Map.of()); + + // By default, no file is allowed + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1), + ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1), + ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); + + // add one file into allowed list + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, file1); + assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1)); + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file2), + ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); + + // add all files into allowed list + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, file1 + "," + file2); + assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1)); + assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file2)); + } + + private void testUrl(String value) { System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); ConfigurationUtils cu = new ConfigurationUtils(configs); - cu.validateFile(URL_CONFIG_NAME); + cu.validateUrl(URL_CONFIG_NAME); } + private void testFile(String value) { + System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, value == null ? "" : value); + Map configs = Collections.singletonMap(FILE_CONFIG_NAME, value); + ConfigurationUtils cu = new ConfigurationUtils(configs); + cu.validateFile(FILE_CONFIG_NAME); + } + + private void testFileUrl(String value) { + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); + Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); + ConfigurationUtils cu = new ConfigurationUtils(configs); + cu.validateFileUrl(URL_CONFIG_NAME); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java deleted file mode 100644 index 8b1c5a370652e..0000000000000 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.security.oauthbearer.internals.secured; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; - -import org.junit.jupiter.api.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.HttpURLConnection; -import java.nio.charset.StandardCharsets; -import java.util.Random; - -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class HttpAccessTokenRetrieverTest extends OAuthBearerTest { - - @Test - public void test() throws IOException { - String expectedResponse = "Hiya, buddy"; - HttpURLConnection mockedCon = createHttpURLConnection(expectedResponse); - String response = HttpAccessTokenRetriever.post(mockedCon, null, null, null, null); - assertEquals(expectedResponse, response); - } - - @Test - public void testEmptyResponse() throws IOException { - HttpURLConnection mockedCon = createHttpURLConnection(""); - assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - } - - @Test - public void testErrorReadingResponse() throws IOException { - HttpURLConnection mockedCon = createHttpURLConnection("dummy"); - when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); - - assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - } - - @Test - public void testErrorResponseUnretryableCode() throws IOException { - HttpURLConnection mockedCon = createHttpURLConnection("dummy"); - when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); - when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( - "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" - .getBytes(StandardCharsets.UTF_8))); - when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); - UnretryableException ioe = assertThrows(UnretryableException.class, - () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); - } - - @Test - public void testErrorResponseRetryableCode() throws IOException { - HttpURLConnection mockedCon = createHttpURLConnection("dummy"); - when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); - when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( - "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" - .getBytes(StandardCharsets.UTF_8))); - when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); - IOException ioe = assertThrows(IOException.class, - () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); - - // error response body has different keys - when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( - "{\"errorCode\":\"some_arg\", \"errorSummary\":\"some problem with arg\"}" - .getBytes(StandardCharsets.UTF_8))); - ioe = assertThrows(IOException.class, - () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); - - // error response is valid json but unknown keys - when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( - "{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}" - .getBytes(StandardCharsets.UTF_8))); - ioe = assertThrows(IOException.class, - () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - assertTrue(ioe.getMessage().contains("{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}")); - } - - @Test - public void testErrorResponseIsInvalidJson() throws IOException { - HttpURLConnection mockedCon = createHttpURLConnection("dummy"); - when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); - when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( - "non json error output".getBytes(StandardCharsets.UTF_8))); - when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); - IOException ioe = assertThrows(IOException.class, - () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); - assertTrue(ioe.getMessage().contains("{non json error output}")); - } - - @Test - public void testCopy() throws IOException { - byte[] expected = new byte[4096 + 1]; - Random r = new Random(); - r.nextBytes(expected); - InputStream in = new ByteArrayInputStream(expected); - ByteArrayOutputStream out = new ByteArrayOutputStream(); - HttpAccessTokenRetriever.copy(in, out); - assertArrayEquals(expected, out.toByteArray()); - } - - @Test - public void testCopyError() throws IOException { - InputStream mockedIn = mock(InputStream.class); - OutputStream out = new ByteArrayOutputStream(); - when(mockedIn.read(any(byte[].class))).thenThrow(new IOException()); - assertThrows(IOException.class, () -> HttpAccessTokenRetriever.copy(mockedIn, out)); - } - - @Test - public void testParseAccessToken() throws IOException { - String expected = "abc"; - ObjectMapper mapper = new ObjectMapper(); - ObjectNode node = mapper.createObjectNode(); - node.put("access_token", expected); - - String actual = HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node)); - assertEquals(expected, actual); - } - - @Test - public void testParseAccessTokenEmptyAccessToken() { - ObjectMapper mapper = new ObjectMapper(); - ObjectNode node = mapper.createObjectNode(); - node.put("access_token", ""); - - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node))); - } - - @Test - public void testParseAccessTokenMissingAccessToken() { - ObjectMapper mapper = new ObjectMapper(); - ObjectNode node = mapper.createObjectNode(); - node.put("sub", "jdoe"); - - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node))); - } - - @Test - public void testParseAccessTokenInvalidJson() { - assertThrows(IOException.class, () -> HttpAccessTokenRetriever.parseAccessToken("not valid JSON")); - } - - @Test - public void testFormatAuthorizationHeader() { - assertAuthorizationHeader("id", "secret", false, "Basic aWQ6c2VjcmV0"); - } - - @Test - public void testFormatAuthorizationHeaderEncoding() { - // according to RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. - assertAuthorizationHeader("SOME_RANDOM_LONG_USER_01234", "9Q|0`8i~ute-n9ksjLWb\\50\"AX@UUED5E", false, "Basic U09NRV9SQU5ET01fTE9OR19VU0VSXzAxMjM0OjlRfDBgOGl+dXRlLW45a3NqTFdiXDUwIkFYQFVVRUQ1RQ=="); - // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 - assertAuthorizationHeader("user!@~'", "secret-(*)!", true, "Basic dXNlciUyMSU0MCU3RSUyNzpzZWNyZXQtJTI4KiUyOSUyMQ=="); - } - - private void assertAuthorizationHeader(String clientId, String clientSecret, boolean urlencode, String expected) { - String actual = HttpAccessTokenRetriever.formatAuthorizationHeader(clientId, clientSecret, urlencode); - assertEquals(expected, actual, String.format("Expected the HTTP Authorization header generated for client ID \"%s\" and client secret \"%s\" to match", clientId, clientSecret)); - } - - @Test - public void testFormatAuthorizationHeaderMissingValues() { - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(null, "secret", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", null, false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(null, null, false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("", "secret", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", "", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("", "", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(" ", "secret", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", " ", false)); - assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(" ", " ", false)); - } - - @Test - public void testFormatRequestBody() { - String expected = "grant_type=client_credentials&scope=scope"; - String actual = HttpAccessTokenRetriever.formatRequestBody("scope"); - assertEquals(expected, actual); - } - - @Test - public void testFormatRequestBodyWithEscaped() { - String questionMark = "%3F"; - String exclamationMark = "%21"; - - String expected = String.format("grant_type=client_credentials&scope=earth+is+great%s", exclamationMark); - String actual = HttpAccessTokenRetriever.formatRequestBody("earth is great!"); - assertEquals(expected, actual); - - expected = String.format("grant_type=client_credentials&scope=what+on+earth%s%s%s%s%s", questionMark, exclamationMark, questionMark, exclamationMark, questionMark); - actual = HttpAccessTokenRetriever.formatRequestBody("what on earth?!?!?"); - assertEquals(expected, actual); - } - - @Test - public void testFormatRequestBodyMissingValues() { - String expected = "grant_type=client_credentials"; - String actual = HttpAccessTokenRetriever.formatRequestBody(null); - assertEquals(expected, actual); - - actual = HttpAccessTokenRetriever.formatRequestBody(""); - assertEquals(expected, actual); - - actual = HttpAccessTokenRetriever.formatRequestBody(" "); - assertEquals(expected, actual); - } - -} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetrieverTest.java new file mode 100644 index 0000000000000..7a6835894c1ff --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpJwtRetrieverTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HttpJwtRetrieverTest extends OAuthBearerTest { + + @Test + public void test() throws IOException { + String expectedResponse = "Hiya, buddy"; + HttpURLConnection mockedCon = createHttpURLConnection(expectedResponse); + String response = HttpJwtRetriever.post(mockedCon, null, null, null, null); + assertEquals(expectedResponse, response); + } + + @Test + public void testEmptyResponse() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection(""); + assertThrows(IOException.class, () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + } + + @Test + public void testErrorReadingResponse() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + + assertThrows(IOException.class, () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + } + + @Test + public void testErrorResponseUnretryableCode() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); + UnretryableException ioe = assertThrows(UnretryableException.class, + () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + } + + @Test + public void testErrorResponseRetryableCode() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); + IOException ioe = assertThrows(IOException.class, + () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + + // error response body has different keys + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"errorCode\":\"some_arg\", \"errorSummary\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + ioe = assertThrows(IOException.class, + () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + + // error response is valid json but unknown keys + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + ioe = assertThrows(IOException.class, + () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}")); + } + + @Test + public void testErrorResponseIsInvalidJson() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "non json error output".getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); + IOException ioe = assertThrows(IOException.class, + () -> HttpJwtRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{non json error output}")); + } + + @Test + public void testCopy() throws IOException { + byte[] expected = new byte[4096 + 1]; + Random r = new Random(); + r.nextBytes(expected); + InputStream in = new ByteArrayInputStream(expected); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + HttpJwtRetriever.copy(in, out); + assertArrayEquals(expected, out.toByteArray()); + } + + @Test + public void testCopyError() throws IOException { + InputStream mockedIn = mock(InputStream.class); + OutputStream out = new ByteArrayOutputStream(); + when(mockedIn.read(any(byte[].class))).thenThrow(new IOException()); + assertThrows(IOException.class, () -> HttpJwtRetriever.copy(mockedIn, out)); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParserTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParserTest.java new file mode 100644 index 0000000000000..c175cbcb94548 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JwtResponseParserTest.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.JwtRetrieverException; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JwtResponseParserTest extends OAuthBearerTest { + + @Test + public void testParseJwt() throws IOException { + String expected = "abc"; + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("access_token", expected); + + JwtResponseParser responseParser = new JwtResponseParser(); + String actual = responseParser.parseJwt(mapper.writeValueAsString(node)); + assertEquals(expected, actual); + } + + @Test + public void testParseJwtEmptyAccessToken() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("access_token", ""); + + JwtResponseParser responseParser = new JwtResponseParser(); + assertThrows(JwtRetrieverException.class, () -> responseParser.parseJwt(mapper.writeValueAsString(node))); + } + + @Test + public void testParseJwtMissingAccessToken() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("sub", "jdoe"); + + JwtResponseParser responseParser = new JwtResponseParser(); + assertThrows(JwtRetrieverException.class, () -> responseParser.parseJwt(mapper.writeValueAsString(node))); + } + + @Test + public void testParseJwtInvalidJson() { + JwtResponseParser responseParser = new JwtResponseParser(); + assertThrows(JwtRetrieverException.class, () -> responseParser.parseJwt("not valid JSON")); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java index 7f20b9464faea..6cfee84178021 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java @@ -19,9 +19,8 @@ import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; -import org.apache.kafka.common.security.authenticator.TestJaasConfig; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; +import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import com.fasterxml.jackson.databind.ObjectMapper; @@ -30,24 +29,35 @@ import org.jose4j.jwk.PublicJsonWebKey; import org.jose4j.jwk.RsaJsonWebKey; import org.jose4j.jwk.RsaJwkGenerator; +import org.jose4j.jwt.consumer.InvalidJwtException; +import org.jose4j.jwt.consumer.JwtConsumer; +import org.jose4j.jwt.consumer.JwtConsumerBuilder; +import org.jose4j.jwt.consumer.JwtContext; import org.jose4j.lang.JoseException; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.TestInstance.Lifecycle; import org.junit.jupiter.api.function.Executable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.StandardOpenOption; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; import java.util.Arrays; import java.util.Base64; import java.util.Collections; +import java.util.EnumSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.function.Consumer; @@ -63,8 +73,6 @@ @TestInstance(Lifecycle.PER_CLASS) public abstract class OAuthBearerTest { - protected final Logger log = LoggerFactory.getLogger(getClass()); - protected ObjectMapper mapper = new ObjectMapper(); protected void assertThrowsWithMessage(Class clazz, @@ -80,18 +88,6 @@ protected void assertErrorMessageContains(String actual, String expectedSubstrin expectedSubstring)); } - protected void configureHandler(AuthenticateCallbackHandler handler, - Map configs, - Map jaasConfig) { - TestJaasConfig config = new TestJaasConfig(); - config.createOrUpdateEntry("KafkaClient", OAuthBearerLoginModule.class.getName(), jaasConfig); - AppConfigurationEntry kafkaClient = config.getAppConfigurationEntry("KafkaClient")[0]; - - handler.configure(configs, - OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - Collections.singletonList(kafkaClient)); - } - protected String createBase64JsonJwtSection(Consumer c) { String json = createJsonJwtSection(c); @@ -147,36 +143,6 @@ protected HttpURLConnection createHttpURLConnection(String response) throws IOEx return mockedCon; } - protected File createTempDir(String directory) throws IOException { - File tmpDir = new File(System.getProperty("java.io.tmpdir")); - - if (directory != null) - tmpDir = new File(tmpDir, directory); - - if (!tmpDir.exists() && !tmpDir.mkdirs()) - throw new IOException("Could not create " + tmpDir); - - tmpDir.deleteOnExit(); - log.debug("Created temp directory {}", tmpDir); - return tmpDir; - } - - protected File createTempFile(File tmpDir, - String prefix, - String suffix, - String contents) - throws IOException { - File file = File.createTempFile(prefix, suffix, tmpDir); - log.debug("Created new temp file {}", file); - file.deleteOnExit(); - - try (FileWriter writer = new FileWriter(file)) { - writer.write(contents); - } - - return file; - } - protected Map getSaslConfigs(Map configs) { ConfigDef configDef = new ConfigDef(); configDef.withClientSaslSupport(); @@ -192,6 +158,20 @@ protected File createTempFile(File tmpDir, return getSaslConfigs(Collections.emptyMap()); } + protected List getJaasConfigEntries() { + return getJaasConfigEntries(Map.of()); + } + + protected List getJaasConfigEntries(Map options) { + return List.of( + new AppConfigurationEntry( + OAuthBearerLoginModule.class.getName(), + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + options + ) + ); + } + protected PublicJsonWebKey createRsaJwk() throws JoseException { RsaJsonWebKey jwk = RsaJwkGenerator.generateJwk(2048); jwk.setKeyId("key-1"); @@ -212,4 +192,75 @@ protected PublicJsonWebKey createEcJwk() throws JoseException { return jwk; } + protected String createJwt(String header, String payload, String signature) { + Base64.Encoder enc = Base64.getUrlEncoder(); + header = enc.encodeToString(Utils.utf8(header)); + payload = enc.encodeToString(Utils.utf8(payload)); + signature = enc.encodeToString(Utils.utf8(signature)); + return String.format("%s.%s.%s", header, payload, signature); + } + + protected String createJwt(String subject) { + Time time = Time.SYSTEM; + long nowSeconds = time.milliseconds() / 1000; + + return createJwt( + "{}", + String.format( + "{\"iat\":%s, \"exp\":%s, \"sub\":\"%s\"}", + nowSeconds, + nowSeconds + 300, + subject + ), + "sign" + ); + } + + + protected void assertClaims(PublicKey publicKey, String assertion) throws InvalidJwtException { + JwtConsumer jwtConsumer = jwtConsumer(publicKey); + jwtConsumer.processToClaims(assertion); + } + + protected JwtContext assertContext(PublicKey publicKey, String assertion) throws InvalidJwtException { + JwtConsumer jwtConsumer = jwtConsumer(publicKey); + return jwtConsumer.process(assertion); + } + + protected JwtConsumer jwtConsumer(PublicKey publicKey) { + return new JwtConsumerBuilder() + .setVerificationKey(publicKey) + .setRequireExpirationTime() + .setAllowedClockSkewInSeconds(30) // Sure, let's give it some slack + .build(); + } + + protected File generatePrivateKey(PrivateKey privateKey) throws IOException { + File file = File.createTempFile("private-", ".key"); + byte[] bytes = Base64.getEncoder().encode(privateKey.getEncoded()); + + try (FileChannel channel = FileChannel.open(file.toPath(), EnumSet.of(StandardOpenOption.WRITE))) { + Utils.writeFully(channel, ByteBuffer.wrap(bytes)); + } + + return file; + } + + protected File generatePrivateKey() throws IOException { + return generatePrivateKey(generateKeyPair().getPrivate()); + } + + protected KeyPair generateKeyPair() { + return generateKeyPair("RSA"); + } + + protected KeyPair generateKeyPair(String algorithm) { + try { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); + keyGen.initialize(2048); + return keyGen.generateKeyPair(); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Received unexpected error during private key generation", e); + } + } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java index c2324b9d2dac2..b515255147f7b 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java @@ -28,6 +28,8 @@ import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; +import static org.apache.kafka.test.TestUtils.tempFile; public class VerificationKeyResolverFactoryTest extends OAuthBearerTest { @@ -38,15 +40,10 @@ public void tearDown() throws Exception { @Test public void testConfigureRefreshingFileVerificationKeyResolver() throws Exception { - File tmpDir = createTempDir("access-token"); - File verificationKeyFile = createTempFile(tmpDir, "access-token-", ".json", "{}"); - - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, verificationKeyFile.toURI().toString()); - Map configs = Collections.singletonMap(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); - Map jaasConfig = Collections.emptyMap(); - - // verify it won't throw exception - try (CloseableVerificationKeyResolver verificationKeyResolver = VerificationKeyResolverFactory.create(configs, jaasConfig)) { } + String file = tempFile("{}").toURI().toString(); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); + Map configs = Collections.singletonMap(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), "The JSON JWKS content does not include the keys member"); } @Test @@ -55,28 +52,15 @@ public void testConfigureRefreshingFileVerificationKeyResolverWithInvalidDirecto String file = new File("/tmp/this-directory-does-not-exist/foo.json").toURI().toString(); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); - Map jaasConfig = Collections.emptyMap(); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, jaasConfig), "that doesn't exist"); - } - - @Test - public void testConfigureRefreshingFileVerificationKeyResolverWithInvalidFile() throws Exception { - // Should fail because while the parent path exists, the file itself doesn't. - File tmpDir = createTempDir("this-directory-does-exist"); - File verificationKeyFile = new File(tmpDir, "this-file-does-not-exist.json"); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, verificationKeyFile.toURI().toString()); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); - Map jaasConfig = Collections.emptyMap(); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, jaasConfig), "that doesn't exist"); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), "that doesn't exist"); } @Test public void testSaslOauthbearerTokenEndpointUrlIsNotAllowed() throws Exception { // Should fail if the URL is not allowed - File tmpDir = createTempDir("not_allowed"); - File verificationKeyFile = new File(tmpDir, "not_allowed.json"); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, Collections.emptyMap()), + String file = tempFile("{}").toURI().toString(); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreatorTest.java new file mode 100644 index 0000000000000..d5b165b468846 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DefaultAssertionCreatorTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; + +import org.jose4j.jwt.consumer.JwtContext; +import org.jose4j.jwx.JsonWebStructure; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.io.File; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.StandardOpenOption; +import java.security.GeneralSecurityException; +import java.security.KeyPair; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.TOKEN_SIGNING_ALGORITHM_RS256; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.getSignature; +import static org.apache.kafka.common.security.oauthbearer.internals.secured.assertion.AssertionUtils.sign; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class DefaultAssertionCreatorTest extends OAuthBearerTest { + + @Test + public void testPrivateKey() throws Exception { + KeyPair keyPair = generateKeyPair(); + Builder builder = new Builder() + .setPrivateKeyFile(generatePrivateKey(keyPair.getPrivate())); + AssertionJwtTemplate jwtTemplate = new LayeredAssertionJwtTemplate( + new StaticAssertionJwtTemplate(Map.of("kid", "test-id"), Map.of()), + new DynamicAssertionJwtTemplate( + new MockTime(), + builder.algorithm, + 3600, + 60, + false + ) + ); + + try (AssertionCreator assertionCreator = builder.build()) { + String assertion = assertionCreator.create(jwtTemplate); + assertClaims(keyPair.getPublic(), assertion); + } + } + + @Test + public void testPrivateKeyId() throws Exception { + KeyPair keyPair = generateKeyPair(); + Builder builder = new Builder() + .setPrivateKeyFile(generatePrivateKey(keyPair.getPrivate())); + + AssertionJwtTemplate jwtTemplate = new LayeredAssertionJwtTemplate( + new StaticAssertionJwtTemplate(Map.of("kid", "test-id"), Map.of()), + new DynamicAssertionJwtTemplate( + new MockTime(), + builder.algorithm, + 3600, + 60, + false + ) + ); + + try (AssertionCreator assertionCreator = builder.build()) { + String assertion = assertionCreator.create(jwtTemplate); + JwtContext context = assertContext(keyPair.getPublic(), assertion); + List joseObjects = context.getJoseObjects(); + assertNotNull(joseObjects); + assertEquals(1, joseObjects.size()); + JsonWebStructure jsonWebStructure = joseObjects.get(0); + assertEquals("test-id", jsonWebStructure.getKeyIdHeaderValue()); + } + } + + @Test + public void testInvalidPrivateKey() throws Exception { + File privateKeyFile = generatePrivateKey(); + long originalFileLength = privateKeyFile.length(); + int bytesToTruncate = 10; // A single byte isn't enough + + // Intentionally "mangle" the private key secret by truncating the file. + try (FileChannel channel = FileChannel.open(privateKeyFile.toPath(), StandardOpenOption.WRITE)) { + long size = channel.size(); + assertEquals(originalFileLength, size); + assertTrue(size > bytesToTruncate); + channel.truncate(size - bytesToTruncate); + } + + assertEquals(originalFileLength - bytesToTruncate, privateKeyFile.length()); + + KafkaException e = assertThrows(KafkaException.class, () -> new Builder().setPrivateKeyFile(privateKeyFile).build()); + assertNotNull(e.getCause()); + assertInstanceOf(GeneralSecurityException.class, e.getCause()); + } + + @ParameterizedTest + @CsvSource("RS256,ES256") + public void testAlgorithm(String algorithm) throws Exception { + KeyPair keyPair = generateKeyPair(); + Builder builder = new Builder() + .setPrivateKeyFile(generatePrivateKey(keyPair.getPrivate())) + .setAlgorithm(algorithm); + + String assertion; + + try (AssertionCreator assertionCreator = builder.build()) { + AssertionJwtTemplate jwtTemplate = new DynamicAssertionJwtTemplate( + new MockTime(), + algorithm, + 3600, + 60, + false + ); + assertion = assertionCreator.create(jwtTemplate); + } + + assertClaims(keyPair.getPublic(), assertion); + + JwtContext context = assertContext(keyPair.getPublic(), assertion); + List joseObjects = context.getJoseObjects(); + assertNotNull(joseObjects); + assertEquals(1, joseObjects.size()); + JsonWebStructure jsonWebStructure = joseObjects.get(0); + assertEquals(algorithm, jsonWebStructure.getAlgorithmHeaderValue()); + } + + @Test + public void testInvalidAlgorithm() throws IOException { + PrivateKey privateKey = generateKeyPair().getPrivate(); + Builder builder = new Builder() + .setPrivateKeyFile(generatePrivateKey(privateKey)) + .setAlgorithm("thisisnotvalid"); + assertThrows(NoSuchAlgorithmException.class, () -> getSignature(builder.algorithm)); + assertThrows( + NoSuchAlgorithmException.class, + () -> sign(builder.algorithm, privateKey, "dummy content")); + } + + private static class Builder { + + private final Time time = new MockTime(); + private String algorithm = TOKEN_SIGNING_ALGORITHM_RS256; + private File privateKeyFile; + private Optional passphrase = Optional.empty(); + + public Builder setAlgorithm(String algorithm) { + this.algorithm = algorithm; + return this; + } + + public Builder setPrivateKeyFile(File privateKeyFile) { + this.privateKeyFile = privateKeyFile; + return this; + } + + public Builder setPassphrase(String passphrase) { + this.passphrase = Optional.of(passphrase); + return this; + } + + private DefaultAssertionCreator build() { + return new DefaultAssertionCreator(algorithm, privateKeyFile, passphrase); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplateTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplateTest.java new file mode 100644 index 0000000000000..54ebc38778816 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/DynamicAssertionJwtTemplateTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.utils.MockTime; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class DynamicAssertionJwtTemplateTest { + + private final MockTime time = new MockTime(); + + @Test + public void testBasicUsage() throws IOException { + String algorithm = "somealg"; + int expiration = 1; + int notBefore = 20; + boolean includeJti = false; + + try (AssertionJwtTemplate template = new DynamicAssertionJwtTemplate(time, algorithm, expiration, notBefore, includeJti)) { + Map header = template.header(); + assertNotNull(header); + assertEquals("JWT", header.get("typ")); + assertEquals(algorithm, header.get("alg")); + + long currSeconds = time.milliseconds() / 1000L; + + Map payload = template.payload(); + assertNotNull(payload); + assertEquals(currSeconds, payload.get("iat")); + assertEquals(currSeconds + expiration, payload.get("exp")); + assertEquals(currSeconds - notBefore, payload.get("nbf")); + assertNull(payload.get("jti")); + } + } + + @Test + public void testJtiUniqueness() throws IOException { + List jwtIds = new ArrayList<>(); + + for (int i = 0; i < 10; i++) { + try (AssertionJwtTemplate template = new DynamicAssertionJwtTemplate(time, "RSA", 1, 2, true)) { + Map payload = template.payload(); + assertNotNull(payload); + String jwtId = (String) payload.get("jti"); + jwtIds.add(jwtId); + } + } + + // A list of JWT IDs will be the same size as a set if there are no duplicates. + List jwtIds2 = new ArrayList<>(new HashSet<>(jwtIds)); + assertEquals(jwtIds.size(), jwtIds2.size()); + + jwtIds.sort(Comparator.naturalOrder()); + jwtIds2.sort(Comparator.naturalOrder()); + assertEquals(jwtIds, jwtIds2); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreatorTest.java new file mode 100644 index 0000000000000..0dbc665344194 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionCreatorTest.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; + +import org.junit.jupiter.api.Test; + +import java.io.File; + +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class FileAssertionCreatorTest extends OAuthBearerTest { + + @Test + public void testBasicUsage() throws Exception { + String expected = createJwt("jdoe"); + File tmpFile = tempFile(expected); + + try (AssertionCreator assertionCreator = new FileAssertionCreator(tmpFile)) { + String assertion = assertionCreator.create(null); + assertEquals(expected, assertion); + } + } + + @Test + public void testJwtWithWhitespace() throws Exception { + String expected = createJwt("jdoe"); + File tmpFile = tempFile(" " + expected + "\n\n\n"); + + try (AssertionCreator assertionCreator = new FileAssertionCreator(tmpFile)) { + String assertion = assertionCreator.create(null); + assertEquals(expected, assertion); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplateTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplateTest.java new file mode 100644 index 0000000000000..edce26946c238 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/assertion/FileAssertionJwtTemplateTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.oauthbearer.internals.secured.assertion; + +import org.apache.kafka.common.KafkaException; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.util.List; +import java.util.Map; + +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class FileAssertionJwtTemplateTest { + + @Test + public void testBasicUsage() throws Exception { + String expected = createTemplateJson( + Map.of("typ", "JWT", "alg", "RS256"), + Map.of("sub", "jdoe") + ); + + File tmpFile = tempFile(expected); + + try (AssertionJwtTemplate template = new FileAssertionJwtTemplate(tmpFile)) { + Map header = template.header(); + assertNotNull(header); + assertEquals("JWT", header.get("typ")); + assertEquals("RS256", header.get("alg")); + + Map payload = template.payload(); + assertNotNull(payload); + assertEquals("jdoe", payload.get("sub")); + } + } + + @Test + public void testHeaderOnly() throws Exception { + String expected = toJson( + Map.of( + "header", + Map.of("typ", "JWT", "alg", "RS256") + ) + ); + + File tmpFile = tempFile(expected); + + try (AssertionJwtTemplate template = new FileAssertionJwtTemplate(tmpFile)) { + Map header = template.header(); + assertNotNull(header); + assertEquals("JWT", header.get("typ")); + assertEquals("RS256", header.get("alg")); + + Map payload = template.payload(); + assertNotNull(payload); + assertTrue(payload.isEmpty()); + } + } + + @Test + public void testPayloadOnly() throws Exception { + String expected = toJson( + Map.of( + "payload", + Map.of("sub", "jdoe") + ) + ); + + File tmpFile = tempFile(expected); + + try (AssertionJwtTemplate template = new FileAssertionJwtTemplate(tmpFile)) { + Map header = template.header(); + assertNotNull(header); + assertTrue(header.isEmpty()); + + Map payload = template.payload(); + assertNotNull(payload); + assertEquals("jdoe", payload.get("sub")); + } + } + + @Test + public void testMalformedFile() throws Exception { + String expected = "{invalid-json}"; + File tmpFile = tempFile(expected); + + assertThrows(KafkaException.class, () -> new FileAssertionJwtTemplate(tmpFile)); + } + + @Test + public void testMalformedFormat() throws Exception { + String expected = toJson(Map.of("header", List.of("foo", "bar", "baz"))); + File tmpFile = tempFile(expected); + + assertThrows(KafkaException.class, () -> new FileAssertionJwtTemplate(tmpFile)); + } + + private String createTemplateJson(Map header, Map payload) { + Map topLevel = Map.of("header", header, "payload", payload); + return toJson(topLevel); + } + + private String toJson(Map map) { + ObjectMapper mapper = new ObjectMapper(); + return assertDoesNotThrow(() -> mapper.writeValueAsString(map)); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index 097a14366d83a..89e6de42c1dc5 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -31,6 +31,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; @@ -86,7 +87,7 @@ public void minimalToken() throws IOException, UnsupportedCallbackException { assertNotNull(jws, "create token failed"); long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, 1000 * 60 * 60); - assertEquals(new HashSet<>(Arrays.asList("sub", "iat", "exp")), jws.claims().keySet()); + assertEquals(Set.of("sub", "iat", "exp"), jws.claims().keySet()); } @SuppressWarnings("unchecked") @@ -123,11 +124,11 @@ public void validOptionsWithExplicitOptionValues() long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, lifetimeSeconds * 1000); Map claims = jws.claims(); - assertEquals(new HashSet<>(Arrays.asList(actualScopeClaimName, principalClaimName, "iat", "exp", "number", - "list", "emptyList1", "emptyList2")), claims.keySet()); - assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), + assertEquals(Set.of(actualScopeClaimName, principalClaimName, "iat", "exp", "number", + "list", "emptyList1", "emptyList2"), claims.keySet()); + assertEquals(Set.of(explicitScope1, explicitScope2), new HashSet<>((List) claims.get(actualScopeClaimName))); - assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), jws.scope()); + assertEquals(Set.of(explicitScope1, explicitScope2), jws.scope()); assertEquals(1.0, jws.claim("number", Number.class)); assertEquals(Arrays.asList("1", "2", ""), jws.claim("list", List.class)); assertEquals(Collections.emptyList(), jws.claim("emptyList1", List.class)); @@ -151,7 +152,7 @@ private static OAuthBearerUnsecuredLoginCallbackHandler createCallbackHandler(Ma private static void confirmCorrectValues(OAuthBearerUnsecuredJws jws, String user, long startMs, long lifetimeSeconds) throws OAuthBearerIllegalTokenException { Map header = jws.header(); - assertEquals(header.size(), 1); + assertEquals(1, header.size()); assertEquals("none", header.get("alg")); assertEquals(user != null ? user : "", jws.principalName()); assertEquals(Long.valueOf(startMs), jws.startTimeMs()); diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java index 232d4d7327bf6..ed76495fa03a8 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java @@ -202,6 +202,8 @@ public class DefaultSslEngineFactoryTest { public void setUp() { factory = sslEngineFactory(); configs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); + configs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); + configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of()); } protected DefaultSslEngineFactory sslEngineFactory() { @@ -216,7 +218,7 @@ public void testPemTrustStoreConfigWithOneCert() throws Exception { KeyStore trustStore = factory.truststore(); List aliases = Collections.list(trustStore.aliases()); - assertEquals(Collections.singletonList("kafka0"), aliases); + assertEquals(List.of("kafka0"), aliases); assertNotNull(trustStore.getCertificate("kafka0"), "Certificate not loaded"); assertNull(trustStore.getKey("kafka0", null), "Unexpected private key"); } @@ -270,7 +272,7 @@ private void verifyPemKeyStoreConfig(String keyFileName, Password keyPassword) t KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(Collections.singletonList("kafka"), aliases); + assertEquals(List.of("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not loaded"); assertNotNull(keyStore.getKey("kafka", keyPassword == null ? null : keyPassword.value().toCharArray()), "Private key not loaded"); @@ -284,7 +286,7 @@ public void testPemTrustStoreFile() throws Exception { KeyStore trustStore = factory.truststore(); List aliases = Collections.list(trustStore.aliases()); - assertEquals(Collections.singletonList("kafka0"), aliases); + assertEquals(List.of("kafka0"), aliases); assertNotNull(trustStore.getCertificate("kafka0"), "Certificate not found"); assertNull(trustStore.getKey("kafka0", null), "Unexpected private key"); } @@ -299,7 +301,7 @@ public void testPemKeyStoreFileNoKeyPassword() throws Exception { KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(Collections.singletonList("kafka"), aliases); + assertEquals(List.of("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not loaded"); assertNotNull(keyStore.getKey("kafka", null), "Private key not loaded"); } @@ -314,7 +316,7 @@ public void testPemKeyStoreFileWithKeyPassword() throws Exception { KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(Collections.singletonList("kafka"), aliases); + assertEquals(List.of("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not found"); assertNotNull(keyStore.getKey("kafka", KEY_PASSWORD.value().toCharArray()), "Private key not found"); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java index 6770e4702ff1f..1c82dc62ff0d4 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java @@ -359,7 +359,6 @@ public void testPemReconfiguration() throws Exception { sslConfig = new TestSecurityConfig(props); sslFactory.reconfigure(sslConfig.values()); assertNotSame(sslEngineFactory, sslFactory.sslEngineFactory(), "SslEngineFactory not recreated"); - sslEngineFactory = sslFactory.sslEngineFactory(); } @Test @@ -400,15 +399,15 @@ public void testUntrustedKeyStoreValidationFails() throws Exception { @Test public void testKeystoreVerifiableUsingTruststore() throws Exception { - verifyKeystoreVerifiableUsingTruststore(false, tlsProtocol); + verifyKeystoreVerifiableUsingTruststore(false); } @Test public void testPemKeystoreVerifiableUsingTruststore() throws Exception { - verifyKeystoreVerifiableUsingTruststore(true, tlsProtocol); + verifyKeystoreVerifiableUsingTruststore(true); } - private void verifyKeystoreVerifiableUsingTruststore(boolean usePem, String tlsProtocol) throws Exception { + private void verifyKeystoreVerifiableUsingTruststore(boolean usePem) throws Exception { File trustStoreFile1 = usePem ? null : TestUtils.tempFile("truststore1", ".jks"); Map sslConfig1 = sslConfigsBuilder(ConnectionMode.SERVER) .createNewTrustStore(trustStoreFile1) @@ -436,15 +435,15 @@ private void verifyKeystoreVerifiableUsingTruststore(boolean usePem, String tlsP @Test public void testCertificateEntriesValidation() throws Exception { - verifyCertificateEntriesValidation(false, tlsProtocol); + verifyCertificateEntriesValidation(false); } @Test public void testPemCertificateEntriesValidation() throws Exception { - verifyCertificateEntriesValidation(true, tlsProtocol); + verifyCertificateEntriesValidation(true); } - private void verifyCertificateEntriesValidation(boolean usePem, String tlsProtocol) throws Exception { + private void verifyCertificateEntriesValidation(boolean usePem) throws Exception { File trustStoreFile = usePem ? null : TestUtils.tempFile("truststore", ".jks"); Map serverSslConfig = sslConfigsBuilder(ConnectionMode.SERVER) .createNewTrustStore(trustStoreFile) diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java index b708b4eeb602d..f693912f6616e 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java @@ -19,7 +19,13 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.AuthorizationException; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.errors.NetworkException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.GetTelemetrySubscriptionsRequestData; import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; import org.apache.kafka.common.message.PushTelemetryRequestData; @@ -63,8 +69,10 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; public class ClientTelemetryReporterTest { @@ -413,6 +421,134 @@ public void testCreateRequestPushCompressionException() { } } + @Test + public void testCreateRequestPushCompressionFallbackToNextType() { + clientTelemetryReporter.configure(configs); + clientTelemetryReporter.contextChange(metricsContext); + + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // Set up subscription with multiple compression types: GZIP -> LZ4 -> SNAPPY + ClientTelemetryReporter.ClientTelemetrySubscription subscription = new ClientTelemetryReporter.ClientTelemetrySubscription( + uuid, 1234, 20000, List.of(CompressionType.GZIP, CompressionType.LZ4, CompressionType.SNAPPY), true, null); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + + try (MockedStatic mockedCompress = Mockito.mockStatic(ClientTelemetryUtils.class, new CallsRealMethods())) { + // First request: GZIP fails with NoClassDefFoundError, should use NONE for this request + mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.GZIP))).thenThrow(new NoClassDefFoundError("GZIP not available")); + + Optional> requestOptional = telemetrySender.createRequest(); + assertNotNull(requestOptional); + assertTrue(requestOptional.isPresent()); + assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); + PushTelemetryRequest request = (PushTelemetryRequest) requestOptional.get().build(); + + // Should fallback to NONE for this request (GZIP gets cached as unsupported) + assertEquals(CompressionType.NONE.id, request.data().compressionType()); + assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); + + // Reset state for next request + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // Second request: LZ4 is selected (since GZIP is now cached as unsupported), LZ4 fails, should use NONE + // Note that some libraries eg. LZ4 return KafkaException with cause as NoClassDefFoundError + mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.LZ4))).thenThrow(new KafkaException(new NoClassDefFoundError("LZ4 not available"))); + + requestOptional = telemetrySender.createRequest(); + assertNotNull(requestOptional); + assertTrue(requestOptional.isPresent()); + assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); + request = (PushTelemetryRequest) requestOptional.get().build(); + + // Should fallback to NONE for this request (LZ4 gets cached as unsupported) + assertEquals(CompressionType.NONE.id, request.data().compressionType()); + assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); + + // Reset state for next request + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // Third request: SNAPPY is selected (since GZIP and LZ4 are now cached as unsupported), SNAPPY fails, should use NONE + mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.SNAPPY))).thenThrow(new NoClassDefFoundError("SNAPPY not available")); + + requestOptional = telemetrySender.createRequest(); + assertNotNull(requestOptional); + assertTrue(requestOptional.isPresent()); + assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); + request = (PushTelemetryRequest) requestOptional.get().build(); + + // Should fallback to NONE for this request (SNAPPY gets cached as unsupported) + assertEquals(CompressionType.NONE.id, request.data().compressionType()); + assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); + + // Reset state for next request + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // Fourth request: All compression types are now cached as unsupported, should use NONE directly + requestOptional = telemetrySender.createRequest(); + assertNotNull(requestOptional); + assertTrue(requestOptional.isPresent()); + assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); + request = (PushTelemetryRequest) requestOptional.get().build(); + + // Should use NONE directly (no compression types are supported) + assertEquals(CompressionType.NONE.id, request.data().compressionType()); + assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); + } + } + + @Test + public void testCreateRequestPushCompressionFallbackAndTermination() { + clientTelemetryReporter.configure(configs); + clientTelemetryReporter.contextChange(metricsContext); + + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // Set up subscription with ZSTD compression type + ClientTelemetryReporter.ClientTelemetrySubscription subscription = new ClientTelemetryReporter.ClientTelemetrySubscription( + uuid, 1234, 20000, List.of(CompressionType.ZSTD, CompressionType.LZ4), true, null); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + + try (MockedStatic mockedCompress = Mockito.mockStatic(ClientTelemetryUtils.class, new CallsRealMethods())) { + + // === Test 1: NoClassDefFoundError fallback (recoverable) === + mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.ZSTD))) + .thenThrow(new NoClassDefFoundError("com/github/luben/zstd/BufferPool")); + + assertEquals(ClientTelemetryState.PUSH_NEEDED, telemetrySender.state()); + + Optional> request1 = telemetrySender.createRequest(); + assertNotNull(request1); + assertTrue(request1.isPresent()); + assertInstanceOf(PushTelemetryRequest.class, request1.get().build()); + PushTelemetryRequest pushRequest1 = (PushTelemetryRequest) request1.get().build(); + assertEquals(CompressionType.NONE.id, pushRequest1.data().compressionType()); // Fallback to NONE + assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); + + // Reset state (simulate successful response handling) + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + + // === Test 2: OutOfMemoryError causes termination (non-recoverable Error) === + mockedCompress.reset(); + mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.LZ4))) + .thenThrow(new OutOfMemoryError("Out of memory during compression")); + + assertEquals(ClientTelemetryState.PUSH_NEEDED, telemetrySender.state()); + + assertThrows(KafkaException.class, telemetrySender::createRequest); + assertEquals(ClientTelemetryState.TERMINATED, telemetrySender.state()); + + // === Test 3: After termination, no more requests === + Optional> request3 = telemetrySender.createRequest(); + assertNotNull(request3); + assertFalse(request3.isPresent()); // No request created + assertEquals(ClientTelemetryState.TERMINATED, telemetrySender.state()); // State remains TERMINATED + } + } + @Test public void testHandleResponseGetSubscriptions() { ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); @@ -770,6 +906,163 @@ public void testTelemetryReporterInitiateCloseAlreadyInTerminatedStates() { .telemetrySender()).state()); } + @Test + public void testHandleFailedGetTelemetrySubscriptionsRequestWithRetriableException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException retriableException = new TimeoutException("Request timed out"); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(retriableException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS, telemetrySender.intervalMs()); + assertTrue(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedGetTelemetrySubscriptionsRequestWithWrappedRetriableException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException wrappedException = new KafkaException(new DisconnectException("Connection lost")); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(wrappedException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS, telemetrySender.intervalMs()); + assertTrue(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedGetTelemetrySubscriptionsRequestWithFatalException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException fatalException = new AuthorizationException("Not authorized for telemetry"); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(fatalException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); + assertFalse(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedGetTelemetrySubscriptionsRequestWithWrappedFatalException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException wrappedException = new KafkaException("Version check failed", + new UnsupportedVersionException("Broker doesn't support telemetry")); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(wrappedException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); + assertFalse(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedPushTelemetryRequestWithRetriableException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); + + KafkaException networkException = new NetworkException("Network failure"); + telemetrySender.handleFailedPushTelemetryRequest(networkException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS, telemetrySender.intervalMs()); + assertTrue(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedPushTelemetryRequestWithFatalException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); + + KafkaException authException = new AuthorizationException("Not authorized to push telemetry"); + telemetrySender.handleFailedPushTelemetryRequest(authException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); + assertFalse(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedRequestWithMultipleRetriableExceptionsInChain() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException chainedException = new TimeoutException("Outer timeout", + new DisconnectException("Inner disconnect")); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(chainedException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(ClientTelemetryReporter.DEFAULT_PUSH_INTERVAL_MS, telemetrySender.intervalMs()); + assertTrue(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedRequestWithGenericKafkaException() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + + KafkaException genericException = new KafkaException("Unknown error"); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest(genericException); + + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); + assertFalse(telemetrySender.enabled()); + } + + @Test + public void testHandleFailedRequestDuringTermination() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATING_PUSH_NEEDED)); + + KafkaException exception = new TimeoutException("Timeout"); + telemetrySender.handleFailedPushTelemetryRequest(exception); + + assertEquals(ClientTelemetryState.TERMINATING_PUSH_NEEDED, telemetrySender.state()); + assertTrue(telemetrySender.enabled()); + } + + @Test + public void testSequentialFailuresWithDifferentExceptionTypes() { + ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); + telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); + + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest( + new TimeoutException("Timeout 1")); + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertTrue(telemetrySender.enabled()); + + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest( + new DisconnectException("Disconnect")); + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertTrue(telemetrySender.enabled()); + + assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); + telemetrySender.handleFailedGetTelemetrySubscriptionsRequest( + new UnsupportedVersionException("Version not supported")); + assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); + assertFalse(telemetrySender.enabled()); + } + @AfterEach public void tearDown() { clientTelemetryReporter.close(); diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java index 41679bed3f7ac..47925ff8e0a02 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java @@ -30,10 +30,9 @@ import java.nio.ByteBuffer; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.Set; import java.util.function.Predicate; import io.opentelemetry.proto.metrics.v1.Metric; @@ -69,12 +68,12 @@ public void testMaybeFetchErrorIntervalMs() { @Test public void testGetSelectorFromRequestedMetrics() { // no metrics selector - assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(Collections.emptyList())); + assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of())); assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(null)); // all metrics selector - assertEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(Collections.singletonList("*"))); + assertEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of("*"))); // specific metrics selector - Predicate selector = ClientTelemetryUtils.getSelectorFromRequestedMetrics(Arrays.asList("metric1", "metric2")); + Predicate selector = ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of("metric1", "metric2")); assertNotEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, selector); assertNotEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, selector); assertTrue(selector.test(new MetricKey("metric1.test"))); @@ -86,7 +85,7 @@ public void testGetSelectorFromRequestedMetrics() { @Test public void testGetCompressionTypesFromAcceptedList() { assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(null).size()); - assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(Collections.emptyList()).size()); + assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(List.of()).size()); List compressionTypes = new ArrayList<>(); compressionTypes.add(CompressionType.GZIP.id); @@ -123,10 +122,24 @@ public void testValidateIntervalMsInvalid(int pushIntervalMs) { @Test public void testPreferredCompressionType() { - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(Collections.emptyList())); - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(null)); - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(Arrays.asList(CompressionType.NONE, CompressionType.GZIP))); - assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(Arrays.asList(CompressionType.GZIP, CompressionType.NONE))); + // Test with no unsupported types + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(), Set.of())); + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.NONE, CompressionType.GZIP), Set.of())); + assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.NONE), Set.of())); + + // Test unsupported type filtering (returns first available type, or NONE if all are unsupported) + assertEquals(CompressionType.LZ4, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.GZIP))); + assertEquals(CompressionType.SNAPPY, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4, CompressionType.SNAPPY), Set.of(CompressionType.GZIP, CompressionType.LZ4))); + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.GZIP, CompressionType.LZ4))); + + // Test edge case: no match between requested and supported types + assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.SNAPPY))); + + // Test NullPointerException for null parameters + assertThrows(NullPointerException.class, () -> + ClientTelemetryUtils.preferredCompressionType(null, Set.of())); + assertThrows(NullPointerException.class, () -> + ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.NONE), null)); } @ParameterizedTest @@ -150,19 +163,19 @@ public void testCompressDecompress(CompressionType compressionType) throws IOExc private MetricsData getMetricsData() { List metricsList = new ArrayList<>(); metricsList.add(SinglePointMetric.sum( - new MetricKey("metricName"), 1.0, true, Instant.now(), null, Collections.emptySet()) + new MetricKey("metricName"), 1.0, true, Instant.now(), null, Set.of()) .builder().build()); metricsList.add(SinglePointMetric.sum( - new MetricKey("metricName1"), 100.0, false, Instant.now(), Instant.now(), Collections.emptySet()) + new MetricKey("metricName1"), 100.0, false, Instant.now(), Instant.now(), Set.of()) .builder().build()); metricsList.add(SinglePointMetric.deltaSum( - new MetricKey("metricName2"), 1.0, true, Instant.now(), Instant.now(), Collections.emptySet()) + new MetricKey("metricName2"), 1.0, true, Instant.now(), Instant.now(), Set.of()) .builder().build()); metricsList.add(SinglePointMetric.gauge( - new MetricKey("metricName3"), 1.0, Instant.now(), Collections.emptySet()) + new MetricKey("metricName3"), 1.0, Instant.now(), Set.of()) .builder().build()); metricsList.add(SinglePointMetric.gauge( - new MetricKey("metricName4"), Long.valueOf(100), Instant.now(), Collections.emptySet()) + new MetricKey("metricName4"), Long.valueOf(100), Instant.now(), Set.of()) .builder().build()); MetricsData.Builder builder = MetricsData.newBuilder(); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java b/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java index aac13f299fe2d..7e153be5862c7 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common.utils; +import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.AfterEach; @@ -23,6 +24,7 @@ import org.junit.jupiter.api.Test; import java.lang.management.ManagementFactory; +import java.util.Map; import javax.management.JMException; import javax.management.MBeanServer; @@ -41,38 +43,49 @@ public class AppInfoParserTest { private static final String METRICS_PREFIX = "app-info-test"; private static final String METRICS_ID = "test"; - private Metrics metrics; private MBeanServer mBeanServer; @BeforeEach public void setUp() { - metrics = new Metrics(new MockTime(1)); mBeanServer = ManagementFactory.getPlatformMBeanServer(); } @AfterEach - public void tearDown() { - metrics.close(); + public void tearDown() throws JMException { + if (mBeanServer.isRegistered(expectedAppObjectName())) { + mBeanServer.unregisterMBean(expectedAppObjectName()); + } } @Test public void testRegisterAppInfoRegistersMetrics() throws JMException { - registerAppInfo(); - registerAppInfoMultipleTimes(); + try (Metrics metrics = new Metrics(new MockTime(1))) { + registerAppInfo(metrics); + registerAppInfoMultipleTimes(metrics); + AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); + } } @Test public void testUnregisterAppInfoUnregistersMetrics() throws JMException { - registerAppInfo(); - AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); - - assertFalse(mBeanServer.isRegistered(expectedAppObjectName())); - assertNull(metrics.metric(metrics.metricName("commit-id", "app-info"))); - assertNull(metrics.metric(metrics.metricName("version", "app-info"))); - assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info"))); + try (Metrics metrics = new Metrics(new MockTime(1))) { + registerAppInfo(metrics); + AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); + + assertFalse(mBeanServer.isRegistered(expectedAppObjectName())); + assertNull(metrics.metric(metrics.metricName("commit-id", "app-info"))); + assertNull(metrics.metric(metrics.metricName("version", "app-info"))); + assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info"))); + + Map idTag = Map.of("client-id", METRICS_ID); + assertNull(metrics.metric(metrics.metricName("commit-id", "app-info", idTag))); + assertNull(metrics.metric(metrics.metricName("version", "app-info", idTag))); + assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag))); + AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); + } } - private void registerAppInfo() throws JMException { + private void registerAppInfo(Metrics metrics) throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, AppInfoParser.getCommitId()); assertEquals(EXPECTED_VERSION, AppInfoParser.getVersion()); @@ -82,9 +95,15 @@ private void registerAppInfo() throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info")).metricValue()); assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info")).metricValue()); assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info")).metricValue()); + + Map idTag = Map.of("client-id", METRICS_ID); + assertTrue(mBeanServer.isRegistered(expectedAppObjectName())); + assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); } - private void registerAppInfoMultipleTimes() throws JMException { + private void registerAppInfoMultipleTimes(Metrics metrics) throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, AppInfoParser.getCommitId()); assertEquals(EXPECTED_VERSION, AppInfoParser.getVersion()); @@ -95,9 +114,37 @@ private void registerAppInfoMultipleTimes() throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info")).metricValue()); assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info")).metricValue()); assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info")).metricValue()); + + Map idTag = Map.of("client-id", METRICS_ID); + assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); } private ObjectName expectedAppObjectName() throws MalformedObjectNameException { return new ObjectName(METRICS_PREFIX + ":type=app-info,id=" + METRICS_ID); } + + @Test + public void testClientIdWontAddRepeatedly() throws JMException { + Map tags = Map.of( + "client-id", METRICS_ID, + "other-tag", "tag-value", + "another-tag", "another-value" + ); + Metrics metrics = new Metrics(new MetricConfig().tags(tags), new MockTime(1)); + AppInfoParser.registerAppInfo(METRICS_PREFIX, METRICS_ID, metrics, EXPECTED_START_MS); + + assertTrue(mBeanServer.isRegistered(expectedAppObjectName())); + assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", tags)).metricValue()); + assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", tags)).metricValue()); + assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", tags)).metricValue()); + + Map idTag = Map.of("client-id", METRICS_ID); + assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); + assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); + metrics.close(); + AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); + } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java deleted file mode 100644 index 057b6118e07a7..0000000000000 --- a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.utils; - -import org.junit.jupiter.api.Test; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; - -import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static org.junit.jupiter.api.Assertions.assertEquals; - -public class FlattenedIteratorTest { - - @Test - public void testNestedLists() { - List> list = asList( - asList("foo", "a", "bc"), - Collections.singletonList("ddddd"), - asList("", "bar2", "baz45")); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); - - // Ensure we can iterate multiple times - List flattened2 = new ArrayList<>(); - flattenedIterable.forEach(flattened2::add); - - assertEquals(flattened, flattened2); - } - - @Test - public void testEmptyList() { - List> list = emptyList(); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(emptyList(), flattened); - } - - @Test - public void testNestedSingleEmptyList() { - List> list = Collections.singletonList(emptyList()); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(emptyList(), flattened); - } - - @Test - public void testEmptyListFollowedByNonEmpty() { - List> list = asList( - emptyList(), - asList("boo", "b", "de")); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); - } - - @Test - public void testEmptyListInBetweenNonEmpty() { - List> list = asList( - Collections.singletonList("aadwdwdw"), - emptyList(), - asList("ee", "aa", "dd")); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); - } - - @Test - public void testEmptyListAtTheEnd() { - List> list = asList( - asList("ee", "dd"), - Collections.singletonList("e"), - emptyList()); - - Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); - List flattened = new ArrayList<>(); - flattenedIterable.forEach(flattened::add); - - assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); - } - -} diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 4220e84b7cc9f..74518fe0f442f 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -896,12 +896,171 @@ public void testPropsToMap() { assertValue(Collections.emptyMap()); } + @Test + public void testPropsToMapNonStringKey() { + ConfigException ce = assertThrows(ConfigException.class, () -> { + Properties props = new Properties(); + props.put(1, "value"); + Utils.propsToMap(props); + }); + assertTrue(ce.getMessage().contains("One or more keys is not a string.")); + + ce = assertThrows(ConfigException.class, () -> { + Properties props = new Properties(); + props.put(true, "value"); + props.put('a', "value"); + Utils.propsToMap(props); + }); + assertEquals("One or more keys is not a string.", ce.getMessage()); + } + + @Test + public void testPropsToMapWithDefaults() { + Properties defaultProperties = new Properties(); + defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); + defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); + + Properties actualProperties = new Properties(defaultProperties); + actualProperties.setProperty("ActualKey1", "ActualValue1"); + actualProperties.setProperty("ActualKey2", "ActualValue2"); + + final Map mapProperties = Utils.propsToMap(actualProperties); + + Map expectedMap = new HashMap<>(); + expectedMap.put("DefaultKey1", "DefaultValue1"); + expectedMap.put("DefaultKey2", "DefaultValue2"); + expectedMap.put("ActualKey1", "ActualValue1"); + expectedMap.put("ActualKey2", "ActualValue2"); + + assertEquals(expectedMap, mapProperties); + } + + @Test + public void testPropsToMapWithDefaultsAndSameKey() { + Properties defaultProperties = new Properties(); + defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); + defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); + + Properties actualProperties = new Properties(defaultProperties); + actualProperties.setProperty("DefaultKey1", "ActualValue1"); + actualProperties.setProperty("ActualKey2", "ActualValue2"); + + final Map mapProperties = Utils.propsToMap(actualProperties); + + Map expectedMap = new HashMap<>(); + expectedMap.put("DefaultKey1", "ActualValue1"); + expectedMap.put("DefaultKey2", "DefaultValue2"); + expectedMap.put("ActualKey2", "ActualValue2"); + + assertEquals(expectedMap, mapProperties); + } + private static void assertValue(Object value) { Properties props = new Properties(); props.put("key", value); assertEquals(Utils.propsToMap(props).get("key"), value); } + @Test + public void testCastToStringObjectMap() { + Map map = new HashMap<>(); + map.put("key1", "value1"); + map.put("key2", 1); + + Map expectedMap = new HashMap<>(); + expectedMap.put("key1", "value1"); + expectedMap.put("key2", 1); + + assertEquals(map, expectedMap); + } + + @Test + public void testCastToStringObjectMapNonStringKey() { + ConfigException ce = assertThrows(ConfigException.class, () -> { + Map map = new HashMap<>(); + map.put(1, "value"); + Utils.castToStringObjectMap(map); + }); + assertTrue(ce.getMessage().contains("Key must be a string.")); + + ce = assertThrows(ConfigException.class, () -> { + Map map = new HashMap<>(); + map.put(true, "value"); + map.put('a', "value"); + Utils.castToStringObjectMap(map); + }); + assertTrue(ce.getMessage().contains("Key must be a string.")); + } + + @Test + public void testCastToStringObjectMapPropertiesAsInput() { + Properties props = new Properties(); + props.put("key1", "value1"); + props.put("key2", "value2"); + + Map expectedMap = new HashMap<>(); + expectedMap.put("key1", "value1"); + expectedMap.put("key2", "value2"); + + assertEquals(expectedMap, Utils.castToStringObjectMap(props)); + assertEquals(Utils.propsToMap(props), Utils.castToStringObjectMap(props)); + } + + @Test + public void testCastToStringObjectMapPropertiesNonStringKey() { + ConfigException ce = assertThrows(ConfigException.class, () -> { + Properties props = new Properties(); + props.put(1, "value"); + Utils.castToStringObjectMap(props); + }); + assertEquals("One or more keys is not a string.", ce.getMessage()); + + ce = assertThrows(ConfigException.class, () -> { + Properties props = new Properties(); + props.put(true, "value"); + props.put('a', "value"); + Utils.castToStringObjectMap(props); + }); + assertEquals("One or more keys is not a string.", ce.getMessage()); + } + + @Test + public void testCastToStringObjectMapPropertiesWithDefaults() { + Properties defaultProperties = new Properties(); + defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); + defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); + + Properties actualProperties = new Properties(defaultProperties); + actualProperties.setProperty("ActualKey1", "ActualValue1"); + actualProperties.setProperty("ActualKey2", "ActualValue2"); + + Map expectedMap = new HashMap<>(); + expectedMap.put("DefaultKey1", "DefaultValue1"); + expectedMap.put("DefaultKey2", "DefaultValue2"); + expectedMap.put("ActualKey1", "ActualValue1"); + expectedMap.put("ActualKey2", "ActualValue2"); + + assertEquals(expectedMap, Utils.castToStringObjectMap(actualProperties)); + } + + @Test + public void testCastToStringObjectMapPropertiesWithDefaultsAndSameKey() { + Properties defaultProperties = new Properties(); + defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); + defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); + + Properties actualProperties = new Properties(defaultProperties); + actualProperties.setProperty("DefaultKey1", "ActualValue1"); + actualProperties.setProperty("ActualKey2", "ActualValue2"); + + Map expectedMap = new HashMap<>(); + expectedMap.put("DefaultKey1", "ActualValue1"); + expectedMap.put("DefaultKey2", "DefaultValue2"); + expectedMap.put("ActualKey2", "ActualValue2"); + + assertEquals(expectedMap, Utils.castToStringObjectMap(actualProperties)); + } + @Test public void testCloseAllQuietly() { AtomicReference exception = new AtomicReference<>(); @@ -1110,6 +1269,13 @@ public void testTryAll() throws Throwable { assertEquals(expected, recorded); } + @Test + public void testMsToNs() { + assertEquals(1000000, Utils.msToNs(1)); + assertEquals(0, Utils.msToNs(0)); + assertThrows(IllegalArgumentException.class, () -> Utils.msToNs(Long.MAX_VALUE)); + } + private Callable recordingCallable(Map recordingMap, String success, TestException failure) { return () -> { if (success == null) diff --git a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java index ea510414f0481..6c80d2b5df5f1 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java @@ -52,7 +52,7 @@ public void accept(ApiKeyVersionsSource source) { if (toVersion > latestVersion) { throw new IllegalArgumentException(String.format("The toVersion %s is newer than the latest version %s", - fromVersion, latestVersion)); + toVersion, latestVersion)); } } diff --git a/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java b/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java index 06d5a4e93eb53..5a6d8b291b0ba 100644 --- a/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java +++ b/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java @@ -38,8 +38,8 @@ public void testRequestMetadataEquals() { assertEquals(requestMetadata, requestMetadata); - assertNotEquals(requestMetadata, null); - assertNotEquals(requestMetadata, new Object()); + assertNotEquals(null, requestMetadata); + assertNotEquals(new Object(), requestMetadata); assertNotEquals(requestMetadata, new RequestMetadata( new ConfigResource(Type.BROKER, "1"), Collections.singletonMap("foo", "bar") diff --git a/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java b/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java index a1d676e15a3aa..715486c1ae36b 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java +++ b/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java @@ -118,6 +118,7 @@ public static void resetCounters() { CONFIG_COUNT.set(0); THROW_CONFIG_EXCEPTION.set(0); CLUSTER_META.set(null); + THROW_ON_CONFIG_EXCEPTION_THRESHOLD.set(0); CLUSTER_ID_BEFORE_ON_CONSUME.set(NO_CLUSTER_ID); } diff --git a/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java b/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java index ac2865e9bb8b6..d88792a06e81b 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java +++ b/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java @@ -20,11 +20,12 @@ import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.serialization.Deserializer; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -public class MockDeserializer implements ClusterResourceListener, Deserializer { +public class MockDeserializer implements ClusterResourceListener, Deserializer { public static AtomicInteger initCount = new AtomicInteger(0); public static AtomicInteger closeCount = new AtomicInteger(0); public static AtomicReference clusterMeta = new AtomicReference<>(); @@ -52,11 +53,12 @@ public void configure(Map configs, boolean isKey) { } @Override - public byte[] deserialize(String topic, byte[] data) { + public String deserialize(String topic, byte[] data) { // This will ensure that we get the cluster metadata when deserialize is called for the first time // as subsequent compareAndSet operations will fail. clusterIdBeforeDeserialize.compareAndSet(noClusterId, clusterMeta.get()); - return data; + if (data == null) return null; + return new String(data, StandardCharsets.UTF_8); } @Override diff --git a/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java b/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java index acc69ab44e31a..9e69f57c96f2c 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java +++ b/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java @@ -110,6 +110,7 @@ public static void resetCounters() { ON_SUCCESS_COUNT.set(0); ON_ERROR_COUNT.set(0); ON_ERROR_WITH_METADATA_COUNT.set(0); + THROW_ON_CONFIG_EXCEPTION_THRESHOLD.set(0); CLUSTER_META.set(null); CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.set(NO_CLUSTER_ID); } diff --git a/clients/src/test/java/org/apache/kafka/test/MockSerializer.java b/clients/src/test/java/org/apache/kafka/test/MockSerializer.java index bfab4b592b88e..890b01a400f61 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockSerializer.java +++ b/clients/src/test/java/org/apache/kafka/test/MockSerializer.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.serialization.Serializer; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -public class MockSerializer implements ClusterResourceListener, Serializer { +public class MockSerializer implements ClusterResourceListener, Serializer { public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); public static final AtomicReference CLUSTER_META = new AtomicReference<>(); @@ -35,11 +36,12 @@ public MockSerializer() { } @Override - public byte[] serialize(String topic, byte[] data) { + public byte[] serialize(String topic, String data) { // This will ensure that we get the cluster metadata when serialize is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_SERIALIZE.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); - return data; + if (data == null) return null; + return data.getBytes(StandardCharsets.UTF_8); } @Override diff --git a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java index 72a1ccfe65106..7b4680a8b0446 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java @@ -84,7 +84,6 @@ import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.ArrayList; -import java.util.Collections; import java.util.Date; import java.util.Enumeration; import java.util.HashMap; @@ -111,7 +110,7 @@ public class TestSslUtils { * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" * @param pair the KeyPair * @param days how many days from now the Certificate is valid for, or - for negative values - how many days before now - * @param algorithm the signing algorithm, eg "SHA1withRSA" + * @param algorithm the signing algorithm, eg "SHA256withRSA" * @return the self-signed certificate * @throws CertificateException thrown if a security error or an IO error occurred. */ @@ -132,7 +131,7 @@ public static X509Certificate generateCertificate(String dn, KeyPair pair, * CA. * @param parentKeyPair The key pair of the issuer. Leave null if you want to generate a root * CA. - * @param algorithm the signing algorithm, eg "SHA1withRSA" + * @param algorithm the signing algorithm, eg "SHA256withRSA" * @return the signed certificate * @throws CertificateException */ @@ -211,6 +210,7 @@ public static Map createSslConfig(String keyManagerAlgorithm, St sslConfigs.put(SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, keyManagerAlgorithm); sslConfigs.put(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, trustManagerAlgorithm); + sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); @@ -372,7 +372,7 @@ static String pem(Certificate cert) throws IOException { try (PemWriter pemWriter = new PemWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8))) { pemWriter.writeObject(new JcaMiscPEMGenerator(cert)); } - return new String(out.toByteArray(), StandardCharsets.UTF_8); + return out.toString(StandardCharsets.UTF_8); } static String pem(PrivateKey privateKey, Password password) throws IOException { @@ -390,7 +390,7 @@ static String pem(PrivateKey privateKey, Password password) throws IOException { } } } - return new String(out.toByteArray(), StandardCharsets.UTF_8); + return out.toString(StandardCharsets.UTF_8); } public static class CertificateBuilder { @@ -399,7 +399,7 @@ public static class CertificateBuilder { private byte[] subjectAltName; public CertificateBuilder() { - this(30, "SHA1withRSA"); + this(30, "SHA256withRSA"); } public CertificateBuilder(int days, String algorithm) { @@ -444,14 +444,19 @@ public X509Certificate generate(X500Name dn, KeyPair keyPair) throws Certificate SubjectPublicKeyInfo subPubKeyInfo = SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); BcContentSignerBuilder signerBuilder; String keyAlgorithm = keyPair.getPublic().getAlgorithm(); - if (keyAlgorithm.equals("RSA")) - signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); - else if (keyAlgorithm.equals("DSA")) - signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); - else if (keyAlgorithm.equals("EC")) - signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); - else - throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); + switch (keyAlgorithm) { + case "RSA": + signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); + break; + case "DSA": + signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); + break; + case "EC": + signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); + break; + default: + throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); + } ContentSigner sigGen = signerBuilder.build(privateKeyAsymKeyParam); // Negative numbers for "days" can be used to generate expired certificates Date now = new Date(); @@ -520,14 +525,19 @@ public X509Certificate generateSignedCertificate(X500Name dn, KeyPair keyPair, SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); BcContentSignerBuilder signerBuilder; String keyAlgorithm = keyPair.getPublic().getAlgorithm(); - if (keyAlgorithm.equals("RSA")) - signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); - else if (keyAlgorithm.equals("DSA")) - signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); - else if (keyAlgorithm.equals("EC")) - signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); - else - throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); + switch (keyAlgorithm) { + case "RSA": + signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); + break; + case "DSA": + signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); + break; + case "EC": + signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); + break; + default: + throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); + } ContentSigner sigGen = signerBuilder.build(privateKeyAsymKeyParam); // Negative numbers for "days" can be used to generate expired certificates Date now = new Date(); @@ -686,6 +696,7 @@ private Map buildJks() throws IOException, GeneralSecurityExcept sslConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); sslConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); sslConfigs.put(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); + sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); @@ -701,7 +712,8 @@ private Map buildPem() throws IOException, GeneralSecurityExcept Map sslConfigs = new HashMap<>(); sslConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, tlsProtocol); - sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsProtocol)); + sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(tlsProtocol)); + sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); if (connectionMode != ConnectionMode.CLIENT || useClientCert) { KeyPair keyPair = generateKeyPair(algorithm); @@ -838,6 +850,7 @@ public static Map generateConfigsWithCertificateChains(String tl List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); + sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); return sslConfigs; } diff --git a/clients/src/test/java/org/apache/kafka/test/TestUtils.java b/clients/src/test/java/org/apache/kafka/test/TestUtils.java index 37f68a97ecb1c..078d006e37a37 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestUtils.java @@ -52,6 +52,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Base64; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -73,7 +74,6 @@ import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -160,7 +160,9 @@ public static MetadataSnapshot metadataSnapshotWith(final int nodes, final Map threads = Thread.getAllStackTraces().keySet().stream() - .filter(t -> t.isDaemon() == isDaemon && t.isAlive() && t.getName().startsWith(threadName)) - .collect(Collectors.toList()); - int threadCount = threads.size(); - assertEquals(0, threadCount); + public static void assertNoLeakedThreadsWithNameAndDaemonStatus(String threadName, boolean isDaemon) throws InterruptedException { + waitForCondition(() -> Thread.getAllStackTraces().keySet().stream() + .noneMatch(t -> t.isDaemon() == isDaemon && t.isAlive() && t.getName().startsWith(threadName)), String.format("Thread leak detected: %s", threadName)); } /** @@ -211,6 +210,17 @@ public static String randomString(final int len) { return b.toString(); } + /** + * Select a random element from collections + * + * @param elements A collection we can select + * @return A element from collection + */ + public static T randomSelect(final Collection elements) { + List elementsCopy = new ArrayList<>(elements); + return elementsCopy.get(SEEDED_RANDOM.nextInt(elementsCopy.size())); + } + /** * Create an empty file in the default temporary-file directory, using the given prefix and suffix * to generate its name. @@ -505,7 +515,7 @@ public static void isValidClusterId(String clusterId) { assertNotNull(clusterId); // Base 64 encoded value is 22 characters - assertEquals(clusterId.length(), 22); + assertEquals(22, clusterId.length()); Pattern clusterIdPattern = Pattern.compile("[a-zA-Z0-9_\\-]+"); Matcher matcher = clusterIdPattern.matcher(clusterId); @@ -516,7 +526,7 @@ public static void isValidClusterId(String clusterId) { byte[] decodedUuid = Base64.getDecoder().decode(originalClusterId); // We expect 16 bytes, same as the input UUID. - assertEquals(decodedUuid.length, 16); + assertEquals(16, decodedUuid.length); //Check if it can be converted back to a UUID. try { @@ -570,17 +580,6 @@ public static ByteBuffer toBuffer(UnalignedRecords records) { return toBuffer(records.toSend()); } - public static Set generateRandomTopicPartitions(int numTopic, int numPartitionPerTopic) { - Set tps = new HashSet<>(); - for (int i = 0; i < numTopic; i++) { - String topic = randomString(32); - for (int j = 0; j < numPartitionPerTopic; j++) { - tps.add(new TopicPartition(topic, j)); - } - } - return tps; - } - /** * Assert that a future raises an expected exception cause type. * This method will wait for the future to complete or timeout(15000 milliseconds). diff --git a/committer-tools/kafka-merge-pr.py b/committer-tools/kafka-merge-pr.py index d649181d74600..be86078687997 100755 --- a/committer-tools/kafka-merge-pr.py +++ b/committer-tools/kafka-merge-pr.py @@ -70,7 +70,7 @@ DEV_BRANCH_NAME = "trunk" -DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.1.0") +DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.2.0") ORIGINAL_HEAD = "" diff --git a/committer-tools/reviewers.py b/committer-tools/reviewers.py index a3539f9d9c386..06ba4919213ab 100755 --- a/committer-tools/reviewers.py +++ b/committer-tools/reviewers.py @@ -63,8 +63,9 @@ def append_message_to_pr_body(pr: int , message: str): return print(f"""New PR body will be:\n\n---\n{updated_pr_body}---\n""") - choice = input(f'Update the body of "{pr_title}"? (y/n): ').strip().lower() - if choice in ['n', 'no']: + choice = input(f'Update the body of "{pr_title}"? [Y/n] ').strip().lower() + if choice not in ['', 'y']: + print("Abort.") return try: diff --git a/committer-tools/update-cache.sh b/committer-tools/update-cache.sh index 015c2b51d5dfd..6dbc12e8a1bb1 100755 --- a/committer-tools/update-cache.sh +++ b/committer-tools/update-cache.sh @@ -16,22 +16,9 @@ # specific language governing permissions and limitations # under the License. -if ! git config --get alias.update-cache > /dev/null; then - printf '\e[36m%s\n\n %s\n\e[0m\n' \ - 'Hint: you can create a Git alias to execute this script. Example:' \ - "git config alias.update-cache '!bash $(realpath "$0")'" -fi - -key="$( - gh cache list \ - --key 'gradle-home-v1|Linux-X64|test' \ - --sort 'created_at' \ - --limit 1 \ - --json 'key' \ - --jq '.[].key' -)" - -sha="$(cut -d '-' -f 5 <<< "$key")" +# Get the latest commit SHA that contains the Gradle build cache. +sha=$(curl -s "https://api.github.com/repos/apache/kafka/actions/caches?key=gradle-home-v1&ref=refs/heads/trunk" \ + | jq -r '.actions_caches | max_by(.created_at) | .key | split("-")[4]') if ! git show "$sha" &> /dev/null; then printf '\e[33m%s\n%s\e[0m\n' \ diff --git a/config/broker.properties b/config/broker.properties index 61a536c9b3071..4a75f0b12d6c0 100644 --- a/config/broker.properties +++ b/config/broker.properties @@ -75,8 +75,8 @@ log.dirs=/tmp/kraft-broker-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 +# This value is recommended to be increased based on the installation resources. +num.recovery.threads.per.data.dir=2 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" diff --git a/config/consumer.properties b/config/consumer.properties index 01bb12eb0899f..f65e529904148 100644 --- a/config/consumer.properties +++ b/config/consumer.properties @@ -4,23 +4,135 @@ # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# see org.apache.kafka.clients.consumer.ConsumerConfig for more details -# list of brokers used for bootstrapping knowledge about the rest of the cluster -# format: host1:port1,host2:port2 ... +# See org.apache.kafka.clients.consumer.ConsumerConfig for more details. +# Consider using environment variables or external configuration management +# for sensitive information like passwords and environment-specific settings. + +##################### Consumer Basics ####################### + +# List of Kafka brokers used for initial cluster discovery and metadata retrieval. +# Format: host1:port1,host2:port2,host3:port3 +# Include all brokers for high availability bootstrap.servers=localhost:9092 -# consumer group id +# Client identifier for logging and metrics. +# Helps with debugging and monitoring. +client.id=test-consumer + +##################### Transaction Support ##################### + +# Isolation level for reading messages. +# Options: read_uncommitted (default), read_committed (for exactly-once semantics). +isolation.level=read_uncommitted + +##################### Consumer Group Configuration ##################### + +# Unique identifier for this consumer group. +# All consumers with the same group.id will share partition consumption. group.id=test-consumer-group -# What to do when there is no initial offset in Kafka or if the current -# offset does not exist any more on the server: latest, earliest, none -#auto.offset.reset= +# What to do when there is no initial offset or if the current offset no longer exists. +# Options: earliest (from beginning), latest (from end), none (throw exception). +# Use 'earliest' to avoid data loss on first run. +auto.offset.reset=earliest + +##################### Partition Assignment Strategy ##################### + +# Strategy for assigning partitions to consumers in a group. +# Options: RangeAssignor, RoundRobinAssignor, StickyAssignor, CooperativeStickyAssignor. +# CooperativeStickyAssignor is recommended (requires Kafka 2.4+). +partition.assignment.strategy=org.apache.kafka.clients.consumer.CooperativeStickyAssignor + +##################### Deserialization ##################### + +# Deserializer class for message keys. +# Common options: StringDeserializer, ByteArrayDeserializer, AvroDeserializer. +key.deserializer=org.apache.kafka.common.serialization.StringDeserializer + +# Deserializer class for message values. +value.deserializer=org.apache.kafka.common.serialization.StringDeserializer + +##################### Offset Management ##################### + +# Whether to automatically commit offsets in the background. +# Set to false for manual offset management and exactly-once processing. +enable.auto.commit=true + +# Frequency (in milliseconds) at which offsets are auto-committed. +# Lower values provide better fault tolerance but increase broker load. +auto.commit.interval.ms=5000 + +##################### Classic Group Session Management ##################### + +# Timeout for detecting consumer failures when using group management. +# Must be between group.min.session.timeout.ms and group.max.session.timeout.ms (broker config). +session.timeout.ms=30000 + +# Expected time between heartbeats when using group management. +# Should be lower than session.timeout.ms (typically 1/3 of session timeout). +heartbeat.interval.ms=10000 + +# Maximum time between successive calls to poll(). +# If exceeded, consumer is considered failed and partition rebalancing occurs. +max.poll.interval.ms=300000 + +##################### Retry And Error Handling ##################### + +# Initial and max time to wait for failed request retries. +# The retry.backoff.ms is the initial backoff value and will increase exponentially +# for each failed request, up to the retry.backoff.max.ms value. +retry.backoff.ms=100 +retry.backoff.max.ms=1000 + +# Total time to wait for a response to a request. +request.timeout.ms=40000 + +# Close idle connections after this many milliseconds. +connections.max.idle.ms=540000 + +##################### Security Configuration ##################### + +# Security protocol for communication with brokers. +# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL +#security.protocol=SASL_SSL + +# SSL configuration. +#ssl.truststore.location=/path/to/truststore.jks +#ssl.truststore.password=truststore-password + +# SASL configuration. +#sasl.mechanism=PLAIN +#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ +# username="your-username" \ +# password="your-password"; + +##################### Performance And Throughput ##################### + +# Minimum data size (bytes) and maximum polling timeout (ms). +# Whichever condition is met first will trigger the fetch operation. +# Balances response latency against message batching efficiency. +# For remote partition fetching, configure remote.fetch.max.wait.ms instead. +fetch.min.bytes=1 +fetch.max.wait.ms=500 + +# Set soft limits to the amount of bytes per fetch request and partition. +# Both max.partition.fetch.bytes and fetch.max.bytes limits can be exceeded when +# the first batch in the first non-empty partition is larger than the configured +# value to ensure that the consumer can make progress. +# Configuring message.max.bytes (broker config) or max.message.bytes (topic config) +# <= fetch.max.bytes prevents oversized fetch responses. +fetch.max.bytes=52428800 +max.partition.fetch.bytes=1048576 + +# Maximum number of records returned in a single poll() call. +# Higher values increase throughput but may cause longer processing delays. +max.poll.records=500 diff --git a/config/controller.properties b/config/controller.properties index 84963c95701d1..3cf3a58b606d6 100644 --- a/config/controller.properties +++ b/config/controller.properties @@ -75,8 +75,8 @@ log.dirs=/tmp/kraft-controller-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 +# This value is recommended to be increased based on the installation resources. +num.recovery.threads.per.data.dir=2 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" diff --git a/config/log4j2.yaml b/config/log4j2.yaml index 49bcf78d136c6..de263c57c928e 100644 --- a/config/log4j2.yaml +++ b/config/log4j2.yaml @@ -44,7 +44,7 @@ Configuration: # State Change appender - name: StateChangeAppender fileName: "${sys:kafka.logs.dir}/state-change.log" - filePattern: "${sys:kafka.logs.dir}/stage-change.log.%d{yyyy-MM-dd-HH}" + filePattern: "${sys:kafka.logs.dir}/state-change.log.%d{yyyy-MM-dd-HH}" PatternLayout: pattern: "${logPattern}" TimeBasedTriggeringPolicy: diff --git a/config/producer.properties b/config/producer.properties index 3a999e7c17e8c..6165ce9ff571c 100644 --- a/config/producer.properties +++ b/config/producer.properties @@ -12,35 +12,127 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# see org.apache.kafka.clients.producer.ProducerConfig for more details -############################# Producer Basics ############################# +# See org.apache.kafka.clients.producer.ProducerConfig for more details. +# Consider using environment variables or external configuration management +# for sensitive information like passwords and environment-specific settings. -# list of brokers used for bootstrapping knowledge about the rest of the cluster -# format: host1:port1,host2:port2 ... +##################### Producer Basics ##################### + +# List of Kafka brokers used for initial cluster discovery and metadata retrieval. +# Format: host1:port1,host2:port2,host3:port3 +# Include all brokers for high availability. bootstrap.servers=localhost:9092 -# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd -compression.type=none +# Client identifier for logging and metrics. +# Helps with debugging and monitoring. +client.id=test-producer + +##################### Transaction Support ##################### + +# Transactional ID for the producer. +# Must be unique across all producer instances. +# Enables exactly-once semantics across multiple partitions/topics. +#transactional.id=test-transactional-id + +# Maximum amount of time in milliseconds that a transaction will remain open. +# Only applies when transactional.id is set. +transaction.timeout.ms=60000 + +##################### Partitioning ##################### + +# Name of the partitioner class for partitioning records. +# Default uses "sticky" partitioning which improves throughput by filling batches +# Options: DefaultPartitioner, RoundRobinPartitioner, UniformStickyPartitioner. +#partitioner.class=org.apache.kafka.clients.producer.RoundRobinPartitioner + +##################### Serialization ##################### + +# Serializer class for message keys. +# Common options: StringSerializer, ByteArraySerializer, AvroSerializer. +key.serializer=org.apache.kafka.common.serialization.StringSerializer + +# Serializer class for message values. +value.serializer=org.apache.kafka.common.serialization.StringSerializer + +##################### Reliability And Durability ##################### + +# Number of acknowledgments the producer requires the leader to have received. +# Options: 0 (no ack), 1 (leader only), all/-1 (all in-sync replicas). +# Use 'all' for maximum durability. +acks=all -# name of the partitioner class for partitioning records; -# The default uses "sticky" partitioning logic which spreads the load evenly between partitions, but improves throughput by attempting to fill the batches sent to each partition. -#partitioner.class= +# Number of retries for failed sends. +# Set to high value or Integer.MAX_VALUE for maximum reliability. +retries=2147483647 -# the maximum amount of time the client will wait for the response of a request -#request.timeout.ms= +# Initial and max time to wait for failed request retries. +# The retry.backoff.ms is the initial backoff value and will increase exponentially +# for each failed request, up to the retry.backoff.max.ms value. +retry.backoff.ms=100 +retry.backoff.max.ms=1000 -# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for -#max.block.ms= +# Enable idempotent producer to prevent duplicate messages. +# Ensures exactly-once delivery semantics when combined with proper consumer settings. +enable.idempotence=true + +# Maximum number of unacknowledged requests the client will send on a single connection. +# Must be <= 5 when enable.idempotence=true to maintain ordering guarantees. +max.in.flight.requests.per.connection=5 + +##################### Timeouts And Blocking ##################### + +# Maximum amount of time the client will wait for the response of a request. +# Should be higher than replica.lag.time.max.ms (broker config). +request.timeout.ms=30000 + +# How long KafkaProducer.send() and KafkaProducer.partitionsFor() will block. +# Should be higher than request.timeout.ms. +max.block.ms=60000 + +# Timeout for broker requests, including produce requests. +# Should be greater than or equal to the sum of request.timeout.ms and linger.ms. +delivery.timeout.ms=120000 + +##################### Security Configuration ##################### + +# Security protocol for communication with brokers. +# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL +#security.protocol=SASL_SSL + +# SSL configuration. +#ssl.truststore.location=/path/to/truststore.jks +#ssl.truststore.password=truststore-password + +# SASL configuration. +#sasl.mechanism=PLAIN +#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ +# username="your-username" \ +# password="your-password"; + +##################### Performance And Throughput ##################### + +# Compression codec for all data generated. +# Options: none, gzip, snappy, lz4, zstd. +# Can greatly improve throughput at the cost of increased CPU usage. +compression.type=none -# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together -#linger.ms= +# Producer will wait up to this delay to batch records together. +# Higher values increase throughput but add latency. +# Set to 0 for lowest latency, 5-100ms for balanced throughput/latency. +linger.ms=5 -# the maximum size of a request in bytes -#max.request.size= +# Default batch size in bytes when batching multiple records sent to a partition. +# Larger batches improve throughput but use more memory. +# 16KB is a good starting point, adjust based on message size and throughput needs. +batch.size=16384 -# the default batch size in bytes when batching multiple records sent to a partition -#batch.size= +# Total bytes of memory the producer can use to buffer records waiting to be sent. +# Should be larger than batch.size * number of partitions you're writing to. +# 32MB is reasonable for most use cases. +buffer.memory=33554432 -# the total bytes of memory the producer can use to buffer records waiting to be sent to the server -#buffer.memory= +# Maximum size of a request in bytes. +# Should accommodate your largest batch size plus overhead. +# 1MB is default and suitable for most cases. +max.request.size=1048576 diff --git a/config/server.properties b/config/server.properties index d4b1fe0bc4dbd..7f1773d354ea1 100644 --- a/config/server.properties +++ b/config/server.properties @@ -78,8 +78,8 @@ log.dirs=/tmp/kraft-combined-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 +# This value is recommended to be increased based on the installation resources. +num.recovery.threads.per.data.dir=2 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java index cf5f01502c83b..b8e10c3dbde18 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java @@ -20,59 +20,55 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Collections; import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; public class ConnectSchema implements Schema { /** * Maps {@link Schema.Type}s to a list of Java classes that can be used to represent them. */ - private static final Map>> SCHEMA_TYPE_CLASSES = new EnumMap<>(Type.class); + private static final Map>> SCHEMA_TYPE_CLASSES = Collections.unmodifiableMap(new EnumMap<>(Map.ofEntries( + Map.entry(Type.INT8, List.of(Byte.class)), + Map.entry(Type.INT16, List.of(Short.class)), + Map.entry(Type.INT32, List.of(Integer.class)), + Map.entry(Type.INT64, List.of(Long.class)), + Map.entry(Type.FLOAT32, List.of(Float.class)), + Map.entry(Type.FLOAT64, List.of(Double.class)), + Map.entry(Type.BOOLEAN, List.of(Boolean.class)), + Map.entry(Type.STRING, List.of(String.class)), + // Bytes are special and have 2 representations. byte[] causes problems because it doesn't handle equals() and + // hashCode() like we want objects to, so we support both byte[] and ByteBuffer. Using plain byte[] can cause + // those methods to fail, so ByteBuffers are recommended + Map.entry(Type.BYTES, List.of(byte[].class, ByteBuffer.class)), + Map.entry(Type.ARRAY, List.of(List.class)), + Map.entry(Type.MAP, List.of(Map.class)), + Map.entry(Type.STRUCT, List.of(Struct.class)) + ))); /** * Maps known logical types to a list of Java classes that can be used to represent them. */ - private static final Map>> LOGICAL_TYPE_CLASSES = new HashMap<>(); + // We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since that's only used to determine schemas for + // schemaless data and logical types will have ambiguous schemas (e.g. many of them use the same Java class) so + // they should not be used without schemas. + private static final Map>> LOGICAL_TYPE_CLASSES = Map.of( + Decimal.LOGICAL_NAME, List.of(BigDecimal.class), + Date.LOGICAL_NAME, List.of(java.util.Date.class), + Time.LOGICAL_NAME, List.of(java.util.Date.class), + Timestamp.LOGICAL_NAME, List.of(java.util.Date.class) + ); /** * Maps the Java classes to the corresponding {@link Schema.Type}. */ - private static final Map, Type> JAVA_CLASS_SCHEMA_TYPES = new HashMap<>(); - - static { - SCHEMA_TYPE_CLASSES.put(Type.INT8, Collections.singletonList(Byte.class)); - SCHEMA_TYPE_CLASSES.put(Type.INT16, Collections.singletonList(Short.class)); - SCHEMA_TYPE_CLASSES.put(Type.INT32, Collections.singletonList(Integer.class)); - SCHEMA_TYPE_CLASSES.put(Type.INT64, Collections.singletonList(Long.class)); - SCHEMA_TYPE_CLASSES.put(Type.FLOAT32, Collections.singletonList(Float.class)); - SCHEMA_TYPE_CLASSES.put(Type.FLOAT64, Collections.singletonList(Double.class)); - SCHEMA_TYPE_CLASSES.put(Type.BOOLEAN, Collections.singletonList(Boolean.class)); - SCHEMA_TYPE_CLASSES.put(Type.STRING, Collections.singletonList(String.class)); - // Bytes are special and have 2 representations. byte[] causes problems because it doesn't handle equals() and - // hashCode() like we want objects to, so we support both byte[] and ByteBuffer. Using plain byte[] can cause - // those methods to fail, so ByteBuffers are recommended - SCHEMA_TYPE_CLASSES.put(Type.BYTES, Arrays.asList(byte[].class, ByteBuffer.class)); - SCHEMA_TYPE_CLASSES.put(Type.ARRAY, Collections.singletonList(List.class)); - SCHEMA_TYPE_CLASSES.put(Type.MAP, Collections.singletonList(Map.class)); - SCHEMA_TYPE_CLASSES.put(Type.STRUCT, Collections.singletonList(Struct.class)); - - for (Map.Entry>> schemaClasses : SCHEMA_TYPE_CLASSES.entrySet()) { - for (Class schemaClass : schemaClasses.getValue()) - JAVA_CLASS_SCHEMA_TYPES.put(schemaClass, schemaClasses.getKey()); - } - - LOGICAL_TYPE_CLASSES.put(Decimal.LOGICAL_NAME, Collections.singletonList(BigDecimal.class)); - LOGICAL_TYPE_CLASSES.put(Date.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); - LOGICAL_TYPE_CLASSES.put(Time.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); - LOGICAL_TYPE_CLASSES.put(Timestamp.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); - // We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since that's only used to determine schemas for - // schemaless data and logical types will have ambiguous schemas (e.g. many of them use the same Java class) so - // they should not be used without schemas. - } + private static final Map, Type> JAVA_CLASS_SCHEMA_TYPES = SCHEMA_TYPE_CLASSES.entrySet() + .stream() + .flatMap(entry -> entry.getValue().stream().map(klass -> Map.entry(klass, entry.getKey()))) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); // The type of the field private final Type type; @@ -110,7 +106,7 @@ public ConnectSchema(Type type, boolean optional, Object defaultValue, String na this.parameters = parameters; if (this.type == Type.STRUCT) { - this.fields = fields == null ? Collections.emptyList() : fields; + this.fields = fields == null ? List.of() : fields; this.fieldsByName = new HashMap<>(this.fields.size()); for (Field field : this.fields) fieldsByName.put(field.name(), field); @@ -283,9 +279,12 @@ private static Schema assertSchemaNotNull(Schema schema, String location) { } private static List> expectedClassesFor(Schema schema) { - List> expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name()); + List> expectedClasses = null; + if (schema.name() != null) { + expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name()); + } if (expectedClasses == null) - expectedClasses = SCHEMA_TYPE_CLASSES.getOrDefault(schema.type(), Collections.emptyList()); + expectedClasses = SCHEMA_TYPE_CLASSES.getOrDefault(schema.type(), List.of()); return expectedClasses; } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java index 4a57663f4c527..4f25e3611a099 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java @@ -117,19 +117,10 @@ public String getName() { } public boolean isPrimitive() { - switch (this) { - case INT8: - case INT16: - case INT32: - case INT64: - case FLOAT32: - case FLOAT64: - case BOOLEAN: - case STRING: - case BYTES: - return true; - } - return false; + return switch (this) { + case INT8, INT16, INT32, INT64, FLOAT32, FLOAT64, BOOLEAN, STRING, BYTES -> true; + default -> false; + }; } } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java index 6a9dd56aeb39c..ed096504e09a2 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java @@ -78,25 +78,13 @@ public static Object project(Schema source, Object record, Schema target) throws } private static Object projectRequiredSchema(Schema source, Object record, Schema target) throws SchemaProjectorException { - switch (target.type()) { - case INT8: - case INT16: - case INT32: - case INT64: - case FLOAT32: - case FLOAT64: - case BOOLEAN: - case BYTES: - case STRING: - return projectPrimitive(source, record, target); - case STRUCT: - return projectStruct(source, (Struct) record, target); - case ARRAY: - return projectArray(source, record, target); - case MAP: - return projectMap(source, record, target); - } - return null; + return switch (target.type()) { + case INT8, INT16, INT32, INT64, FLOAT32, FLOAT64, BOOLEAN, BYTES, STRING -> + projectPrimitive(source, record, target); + case STRUCT -> projectStruct(source, (Struct) record, target); + case ARRAY -> projectArray(source, record, target); + case MAP -> projectMap(source, record, target); + }; } private static Object projectStruct(Schema source, Struct sourceStruct, Schema target) throws SchemaProjectorException { @@ -161,28 +149,15 @@ private static Object projectPrimitive(Schema source, Object record, Schema targ assert target.type().isPrimitive(); Object result; if (isPromotable(source.type(), target.type()) && record instanceof Number numberRecord) { - switch (target.type()) { - case INT8: - result = numberRecord.byteValue(); - break; - case INT16: - result = numberRecord.shortValue(); - break; - case INT32: - result = numberRecord.intValue(); - break; - case INT64: - result = numberRecord.longValue(); - break; - case FLOAT32: - result = numberRecord.floatValue(); - break; - case FLOAT64: - result = numberRecord.doubleValue(); - break; - default: - throw new SchemaProjectorException("Not promotable type."); - } + result = switch (target.type()) { + case INT8 -> numberRecord.byteValue(); + case INT16 -> numberRecord.shortValue(); + case INT32 -> numberRecord.intValue(); + case INT64 -> numberRecord.longValue(); + case FLOAT32 -> numberRecord.floatValue(); + case FLOAT64 -> numberRecord.doubleValue(); + default -> throw new SchemaProjectorException("Not promotable type."); + }; } else { result = record; } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java index f23f1f88a750c..266d31534a81c 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java @@ -430,33 +430,20 @@ protected static Object convertTo(Schema toSchema, Schema fromSchema, Object val } throw new DataException("Unable to convert a null value to a schema that requires a value"); } - switch (toSchema.type()) { - case BYTES: - return convertMaybeLogicalBytes(toSchema, value); - case STRING: - return convertToString(fromSchema, value); - case BOOLEAN: - return convertToBoolean(fromSchema, value); - case INT8: - return convertToByte(fromSchema, value); - case INT16: - return convertToShort(fromSchema, value); - case INT32: - return convertMaybeLogicalInteger(toSchema, fromSchema, value); - case INT64: - return convertMaybeLogicalLong(toSchema, fromSchema, value); - case FLOAT32: - return convertToFloat(fromSchema, value); - case FLOAT64: - return convertToDouble(fromSchema, value); - case ARRAY: - return convertToArray(toSchema, value); - case MAP: - return convertToMapInternal(toSchema, value); - case STRUCT: - return convertToStructInternal(toSchema, value); - } - throw new DataException("Unable to convert " + value + " (" + value.getClass() + ") to " + toSchema); + return switch (toSchema.type()) { + case BYTES -> convertMaybeLogicalBytes(toSchema, value); + case STRING -> convertToString(fromSchema, value); + case BOOLEAN -> convertToBoolean(fromSchema, value); + case INT8 -> convertToByte(fromSchema, value); + case INT16 -> convertToShort(fromSchema, value); + case INT32 -> convertMaybeLogicalInteger(toSchema, fromSchema, value); + case INT64 -> convertMaybeLogicalLong(toSchema, fromSchema, value); + case FLOAT32 -> convertToFloat(fromSchema, value); + case FLOAT64 -> convertToDouble(fromSchema, value); + case ARRAY -> convertToArray(toSchema, value); + case MAP -> convertToMapInternal(toSchema, value); + case STRUCT -> convertToStructInternal(toSchema, value); + }; } private static Serializable convertMaybeLogicalBytes(Schema toSchema, Object value) { @@ -1144,21 +1131,15 @@ private static Schema mergeSchemas(Schema previous, Schema newSchema) { Type previousType = previous.type(); Type newType = newSchema.type(); if (previousType != newType) { - switch (previous.type()) { - case INT8: - return commonSchemaForInt8(newSchema, newType); - case INT16: - return commonSchemaForInt16(previous, newSchema, newType); - case INT32: - return commonSchemaForInt32(previous, newSchema, newType); - case INT64: - return commonSchemaForInt64(previous, newSchema, newType); - case FLOAT32: - return commonSchemaForFloat32(previous, newSchema, newType); - case FLOAT64: - return commonSchemaForFloat64(previous, newType); - } - return null; + return switch (previous.type()) { + case INT8 -> commonSchemaForInt8(newSchema, newType); + case INT16 -> commonSchemaForInt16(previous, newSchema, newType); + case INT32 -> commonSchemaForInt32(previous, newSchema, newType); + case INT64 -> commonSchemaForInt64(previous, newSchema, newType); + case FLOAT32 -> commonSchemaForFloat32(previous, newSchema, newType); + case FLOAT64 -> commonSchemaForFloat64(previous, newType); + default -> null; + }; } if (previous.isOptional() == newSchema.isOptional()) { // Use the optional one @@ -1273,10 +1254,8 @@ public boolean canDetect(Object value) { } if (knownType == null) { knownType = schema.type(); - } else if (knownType != schema.type()) { - return false; } - return true; + return knownType == schema.type(); } public Schema schema() { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java index ecd7b2e755ae4..2da2bd8d07e16 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java @@ -16,10 +16,10 @@ */ package org.apache.kafka.connect.storage; -import java.util.Collections; -import java.util.HashMap; +import java.util.Arrays; import java.util.Locale; import java.util.Map; +import java.util.stream.Collectors; /** * The type of {@link Converter} and {@link HeaderConverter}. @@ -29,16 +29,8 @@ public enum ConverterType { VALUE, HEADER; - private static final Map NAME_TO_TYPE; - - static { - ConverterType[] types = ConverterType.values(); - Map nameToType = new HashMap<>(types.length); - for (ConverterType type : types) { - nameToType.put(type.name, type); - } - NAME_TO_TYPE = Collections.unmodifiableMap(nameToType); - } + private static final Map NAME_TO_TYPE = Arrays.stream(ConverterType.values()) + .collect(Collectors.toUnmodifiableMap(ConverterType::getName, t -> t)); /** * Find the ConverterType with the given name, using a case-insensitive match. diff --git a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java index efa56aca4692d..8b9c16bfe6421 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java @@ -21,7 +21,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -33,15 +32,15 @@ public class ConnectorReconfigurationTest { @Test public void testDefaultReconfigure() { TestConnector conn = new TestConnector(false); - conn.reconfigure(Collections.emptyMap()); - assertEquals(conn.stopOrder, 0); - assertEquals(conn.configureOrder, 1); + conn.reconfigure(Map.of()); + assertEquals(0, conn.stopOrder); + assertEquals(1, conn.configureOrder); } @Test public void testReconfigureStopException() { TestConnector conn = new TestConnector(true); - assertThrows(ConnectException.class, () -> conn.reconfigure(Collections.emptyMap())); + assertThrows(ConnectException.class, () -> conn.reconfigure(Map.of())); } private static class TestConnector extends Connector { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java index b4e9f81ce8163..95cc36edb1bd1 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -86,17 +87,17 @@ public void testValidateValueMatchingType() { ConnectSchema.validateValue(Schema.STRING_SCHEMA, "a string"); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, "a byte array".getBytes()); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, ByteBuffer.wrap("a byte array".getBytes())); - ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)); + ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, 3)); ConnectSchema.validateValue( SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build(), - Collections.singletonMap(1, "value") + Map.of(1, "value") ); // Struct tests the basic struct layout + complex field types + nested structs Struct structValue = new Struct(STRUCT_SCHEMA) .put("first", 1) .put("second", "foo") - .put("array", Arrays.asList(1, 2, 3)) - .put("map", Collections.singletonMap(1, "value")) + .put("array", List.of(1, 2, 3)) + .put("map", Map.of(1, "value")) .put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12)); ConnectSchema.validateValue(STRUCT_SCHEMA, structValue); } @@ -171,7 +172,7 @@ public void testValidateValueMismatchBytes() { @Test public void testValidateValueMismatchArray() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList("a", "b", "c"))); + () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of("a", "b", "c"))); } @Test @@ -179,19 +180,19 @@ public void testValidateValueMismatchArraySomeMatch() { // Even if some match the right type, this should fail if any mismatch. In this case, type erasure loses // the fact that the list is actually List, but we couldn't tell if only checking the first element assertThrows(DataException.class, - () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, "c"))); + () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, "c"))); } @Test public void testValidateValueMismatchMapKey() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap("wrong key type", "value"))); + () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Map.of("wrong key type", "value"))); } @Test public void testValidateValueMismatchMapValue() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap(1, 2))); + () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Map.of(1, 2))); } @Test @@ -259,7 +260,7 @@ public void testPrimitiveEquality() { ConnectSchema differentName = new ConnectSchema(Schema.Type.INT8, false, null, "otherName", 2, "doc"); ConnectSchema differentVersion = new ConnectSchema(Schema.Type.INT8, false, null, "name", 4, "doc"); ConnectSchema differentDoc = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "other doc"); - ConnectSchema differentParameters = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc", Collections.singletonMap("param", "value"), null, null, null); + ConnectSchema differentParameters = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc", Map.of("param", "value"), null, null, null); assertEquals(s1, s2); assertNotEquals(s1, differentType); @@ -311,13 +312,13 @@ public void testStructEquality() { // Same as testArrayEquality, but checks differences in fields. Only does a simple check, relying on tests of // Field's equals() method to validate all variations in the list of fields will be checked ConnectSchema s1 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), + List.of(new Field("field", 0, SchemaBuilder.int8().build()), new Field("field2", 1, SchemaBuilder.int16().build())), null, null); ConnectSchema s2 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), + List.of(new Field("field", 0, SchemaBuilder.int8().build()), new Field("field2", 1, SchemaBuilder.int16().build())), null, null); ConnectSchema differentField = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), + List.of(new Field("field", 0, SchemaBuilder.int8().build()), new Field("different field name", 1, SchemaBuilder.int16().build())), null, null); assertEquals(s1, s2); @@ -365,44 +366,44 @@ public void testValidateList() { // Optional element schema Schema optionalStrings = SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyList()); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList("hello")); + ConnectSchema.validateValue(fieldName, optionalStrings, List.of()); + ConnectSchema.validateValue(fieldName, optionalStrings, List.of("hello")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList(null)); - ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", "world")); + ConnectSchema.validateValue(fieldName, optionalStrings, List.of("hello", "world")); ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", null)); ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList(null, "world")); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), + assertInvalidValueForSchema(fieldName, optionalStrings, List.of(true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); // Required element schema Schema requiredStrings = SchemaBuilder.array(Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyList()); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonList("hello")); + ConnectSchema.validateValue(fieldName, requiredStrings, List.of()); + ConnectSchema.validateValue(fieldName, requiredStrings, List.of("hello")); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonList(null), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - ConnectSchema.validateValue(fieldName, requiredStrings, Arrays.asList("hello", "world")); + ConnectSchema.validateValue(fieldName, requiredStrings, List.of("hello", "world")); assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList("hello", null), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList(null, "world"), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), + assertInvalidValueForSchema(fieldName, optionalStrings, List.of(true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); // Null element schema Schema nullElements = SchemaBuilder.type(Schema.Type.ARRAY); - assertInvalidValueForSchema(fieldName, nullElements, Collections.emptyList(), + assertInvalidValueForSchema(fieldName, nullElements, List.of(), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList("hello"), + assertInvalidValueForSchema(fieldName, nullElements, List.of("hello"), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(null), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", "world"), + assertInvalidValueForSchema(fieldName, nullElements, List.of("hello", "world"), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", null), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList(null, "world"), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(true), + assertInvalidValueForSchema(fieldName, nullElements, List.of(true), "No schema defined for element of array field: \"field\""); } @@ -412,36 +413,36 @@ public void testValidateMap() { // Optional element schema Schema optionalStrings = SchemaBuilder.map(Schema.OPTIONAL_STRING_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyMap()); - ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", "value")); + ConnectSchema.validateValue(fieldName, optionalStrings, Map.of()); + ConnectSchema.validateValue(fieldName, optionalStrings, Map.of("key", "value")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", null)); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, "value")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, null)); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap("key", true), + assertInvalidValueForSchema(fieldName, optionalStrings, Map.of("key", true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap(true, "value"), + assertInvalidValueForSchema(fieldName, optionalStrings, Map.of(true, "value"), "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); // Required element schema Schema requiredStrings = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyMap()); - ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonMap("key", "value")); + ConnectSchema.validateValue(fieldName, requiredStrings, Map.of()); + ConnectSchema.validateValue(fieldName, requiredStrings, Map.of("key", "value")); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", null), "Invalid value: null used for required value of map field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, "value"), "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, null), "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", true), + assertInvalidValueForSchema(fieldName, requiredStrings, Map.of("key", true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(true, "value"), + assertInvalidValueForSchema(fieldName, requiredStrings, Map.of(true, "value"), "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); // Null key schema Schema nullKeys = SchemaBuilder.type(Schema.Type.MAP); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.emptyMap(), + assertInvalidValueForSchema(fieldName, nullKeys, Map.of(), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", "value"), + assertInvalidValueForSchema(fieldName, nullKeys, Map.of("key", "value"), "No schema defined for key of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", null), "No schema defined for key of map field: \"field\""); @@ -449,16 +450,16 @@ public void testValidateMap() { "No schema defined for key of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(null, null), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", true), + assertInvalidValueForSchema(fieldName, nullKeys, Map.of("key", true), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(true, "value"), + assertInvalidValueForSchema(fieldName, nullKeys, Map.of(true, "value"), "No schema defined for key of map field: \"field\""); // Null value schema Schema nullValues = SchemaBuilder.mapWithNullValues(Schema.OPTIONAL_STRING_SCHEMA); - assertInvalidValueForSchema(fieldName, nullValues, Collections.emptyMap(), + assertInvalidValueForSchema(fieldName, nullValues, Map.of(), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", "value"), + assertInvalidValueForSchema(fieldName, nullValues, Map.of("key", "value"), "No schema defined for value of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", null), "No schema defined for value of map field: \"field\""); @@ -466,9 +467,9 @@ public void testValidateMap() { "No schema defined for value of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(null, null), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", true), + assertInvalidValueForSchema(fieldName, nullValues, Map.of("key", true), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(true, "value"), + assertInvalidValueForSchema(fieldName, nullValues, Map.of(true, "value"), "No schema defined for value of map field: \"field\""); } } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java index 9592fb918e759..efb4a75ed296b 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java @@ -20,7 +20,7 @@ import java.math.BigDecimal; import java.math.BigInteger; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -36,7 +36,7 @@ public class DecimalTest { public void testBuilder() { Schema plain = Decimal.builder(2).build(); assertEquals(Decimal.LOGICAL_NAME, plain.name()); - assertEquals(Collections.singletonMap(Decimal.SCALE_FIELD, "2"), plain.parameters()); + assertEquals(Map.of(Decimal.SCALE_FIELD, "2"), plain.parameters()); assertEquals(1, (Object) plain.version()); } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java index c789541ae5377..23a96f92c15ae 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java @@ -21,8 +21,6 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -234,14 +232,14 @@ public void testNonStructCantHaveFields() { public void testArrayBuilder() { Schema schema = SchemaBuilder.array(Schema.INT8_SCHEMA).build(); assertTypeAndDefault(schema, Schema.Type.ARRAY, false, null); - assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); + assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); assertNoMetadata(schema); // Default value - List defArray = Arrays.asList((byte) 1, (byte) 2); + List defArray = List.of((byte) 1, (byte) 2); schema = SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(defArray).build(); assertTypeAndDefault(schema, Schema.Type.ARRAY, false, defArray); - assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); + assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); assertNoMetadata(schema); } @@ -249,7 +247,7 @@ public void testArrayBuilder() { public void testArrayBuilderInvalidDefault() { // Array, but wrong embedded type assertThrows(SchemaBuilderException.class, - () -> SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(Collections.singletonList("string")).build()); + () -> SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(List.of("string")).build()); } @Test @@ -257,30 +255,30 @@ public void testMapBuilder() { // SchemaBuilder should also pass the check Schema schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA); assertTypeAndDefault(schema, Schema.Type.MAP, false, null); - assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); - assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); + assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); + assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); assertNoMetadata(schema); schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA).build(); assertTypeAndDefault(schema, Schema.Type.MAP, false, null); - assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); - assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); + assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); + assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); assertNoMetadata(schema); // Default value - Map defMap = Collections.singletonMap((byte) 5, (byte) 10); + Map defMap = Map.of((byte) 5, (byte) 10); schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA) .defaultValue(defMap).build(); assertTypeAndDefault(schema, Schema.Type.MAP, false, defMap); - assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); - assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); + assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); + assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); assertNoMetadata(schema); } @Test public void testMapBuilderInvalidDefault() { // Map, but wrong embedded type - Map defMap = Collections.singletonMap((byte) 5, "foo"); + Map defMap = Map.of((byte) 5, "foo"); assertThrows(SchemaBuilderException.class, () -> SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA) .defaultValue(defMap).build()); } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java index 4ec35d369adb9..0f438c0e0ff2b 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java @@ -24,8 +24,6 @@ import java.math.BigDecimal; import java.math.BigInteger; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -78,12 +76,12 @@ public void testNumericTypeProjection() { Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345}; Map> expectedProjected = new HashMap<>(); - expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); - expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.)); - expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.)); - expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.)); - expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2)); - expectedProjected.put(values[5], Collections.singletonList(1.2345)); + expectedProjected.put(values[0], List.of((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); + expectedProjected.put(values[1], List.of((short) 255, 255, 255L, 255.F, 255.)); + expectedProjected.put(values[2], List.of(32767, 32767L, 32767.F, 32767.)); + expectedProjected.put(values[3], List.of(327890L, 327890.F, 327890.)); + expectedProjected.put(values[4], List.of(1.2F, 1.2)); + expectedProjected.put(values[5], List.of(1.2345)); Object promoted; for (int i = 0; i < promotableSchemas.length; ++i) { @@ -298,16 +296,16 @@ public void testNestedSchemaProjection() { Struct sourceNestedStruct = new Struct(sourceNestedSchema); sourceNestedStruct.put("first", 1); sourceNestedStruct.put("second", "abc"); - sourceNestedStruct.put("array", Arrays.asList(1, 2)); - sourceNestedStruct.put("map", Collections.singletonMap(5, "def")); + sourceNestedStruct.put("array", List.of(1, 2)); + sourceNestedStruct.put("map", Map.of(5, "def")); sourceNestedStruct.put("nested", sourceFlatStruct); Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct, targetNestedSchema); assertEquals(1, targetNestedStruct.get("first")); assertEquals("abc", targetNestedStruct.get("second")); - assertEquals(Arrays.asList(1, 2), targetNestedStruct.get("array")); - assertEquals(Collections.singletonMap(5, "def"), targetNestedStruct.get("map")); + assertEquals(List.of(1, 2), targetNestedStruct.get("array")); + assertEquals(Map.of(5, "def"), targetNestedStruct.get("map")); Struct projectedStruct = (Struct) targetNestedStruct.get("nested"); assertEquals(113, projectedStruct.get("field")); @@ -360,22 +358,22 @@ public void testLogicalTypeProjection() { public void testArrayProjection() { Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); - Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source); - assertEquals(Arrays.asList(1, 2, 3), projected); + Object projected = SchemaProjector.project(source, List.of(1, 2, 3), source); + assertEquals(List.of(1, 2, 3), projected); Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build(); - Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build(); - projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target); - assertEquals(Arrays.asList(4, 5), projected); + Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(List.of(1, 2, 3)).build(); + projected = SchemaProjector.project(optionalSource, List.of(4, 5), target); + assertEquals(List.of(4, 5), projected); projected = SchemaProjector.project(optionalSource, null, target); - assertEquals(Arrays.asList(1, 2, 3), projected); + assertEquals(List.of(1, 2, 3), projected); - Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build(); - projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget); - List expectedProjected = Arrays.asList(4L, 5L); + Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(List.of(1L, 2L, 3L)).build(); + projected = SchemaProjector.project(optionalSource, List.of(4, 5), promotedTarget); + List expectedProjected = List.of(4L, 5L); assertEquals(expectedProjected, projected); projected = SchemaProjector.project(optionalSource, null, promotedTarget); - assertEquals(Arrays.asList(1L, 2L, 3L), projected); + assertEquals(List.of(1L, 2L, 3L), projected); Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(optionalSource, null, @@ -391,18 +389,18 @@ public void testArrayProjection() { public void testMapProjection() { Schema source = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).optional().build(); - Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Collections.singletonMap(1, 2)).build(); - Object projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), target); - assertEquals(Collections.singletonMap(3, 4), projected); + Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Map.of(1, 2)).build(); + Object projected = SchemaProjector.project(source, Map.of(3, 4), target); + assertEquals(Map.of(3, 4), projected); projected = SchemaProjector.project(source, null, target); - assertEquals(Collections.singletonMap(1, 2), projected); + assertEquals(Map.of(1, 2), projected); Schema promotedTarget = SchemaBuilder.map(Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA).defaultValue( - Collections.singletonMap(3L, 4.5F)).build(); - projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), promotedTarget); - assertEquals(Collections.singletonMap(3L, 4.F), projected); + Map.of(3L, 4.5F)).build(); + projected = SchemaProjector.project(source, Map.of(3, 4), promotedTarget); + assertEquals(Map.of(3L, 4.F), projected); projected = SchemaProjector.project(source, null, promotedTarget); - assertEquals(Collections.singletonMap(3L, 4.5F), projected); + assertEquals(Map.of(3L, 4.5F), projected); Schema noDefaultValueTarget = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(); assertThrows(SchemaProjectorException.class, @@ -424,7 +422,7 @@ public void testMaybeCompatible() { () -> SchemaProjector.project(source, 12, target), "Source name and target name mismatch."); - Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value")); + Schema targetWithParameters = SchemaBuilder.int32().parameters(Map.of("key", "value")); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(source, 34, targetWithParameters), "Source parameters and target parameters mismatch."); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java index 6dee26ca83ac5..bfdec2fcb9b65 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java @@ -21,8 +21,6 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -93,8 +91,8 @@ public void testFlatStruct() { @Test public void testComplexStruct() { - List array = Arrays.asList((byte) 1, (byte) 2); - Map map = Collections.singletonMap(1, "string"); + List array = List.of((byte) 1, (byte) 2); + Map map = Map.of(1, "string"); Struct struct = new Struct(NESTED_SCHEMA) .put("array", array) .put("map", map) @@ -124,13 +122,13 @@ public void testInvalidFieldType() { @Test public void testInvalidArrayFieldElements() { assertThrows(DataException.class, - () -> new Struct(NESTED_SCHEMA).put("array", Collections.singletonList("should fail since elements should be int8s"))); + () -> new Struct(NESTED_SCHEMA).put("array", List.of("should fail since elements should be int8s"))); } @Test public void testInvalidMapKeyElements() { assertThrows(DataException.class, - () -> new Struct(NESTED_SCHEMA).put("map", Collections.singletonMap("should fail because keys should be int8s", (byte) 12))); + () -> new Struct(NESTED_SCHEMA).put("map", Map.of("should fail because keys should be int8s", (byte) 12))); } @Test @@ -219,20 +217,20 @@ public void testEquals() { assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); - List array = Arrays.asList((byte) 1, (byte) 2); - Map map = Collections.singletonMap(1, "string"); + List array = List.of((byte) 1, (byte) 2); + Map map = Map.of(1, "string"); struct1 = new Struct(NESTED_SCHEMA) .put("array", array) .put("map", map) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); - List array2 = Arrays.asList((byte) 1, (byte) 2); - Map map2 = Collections.singletonMap(1, "string"); + List array2 = List.of((byte) 1, (byte) 2); + Map map2 = Map.of(1, "string"); struct2 = new Struct(NESTED_SCHEMA) .put("array", array2) .put("map", map2) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); - List array3 = Arrays.asList((byte) 1, (byte) 2, (byte) 3); - Map map3 = Collections.singletonMap(2, "string"); + List array3 = List.of((byte) 1, (byte) 2, (byte) 3); + Map map3 = Map.of(2, "string"); struct3 = new Struct(NESTED_SCHEMA) .put("array", array3) .put("map", map3) diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java index ac6eef6fa6800..d100be29b4db9 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java @@ -30,7 +30,6 @@ import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.time.Instant; -import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZoneId; @@ -38,7 +37,6 @@ import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -184,7 +182,7 @@ public void shouldParseBooleanLiteralsEmbeddedInArray() { SchemaAndValue schemaAndValue = Values.parseString("[true, false]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type()); - assertEquals(Arrays.asList(true, false), schemaAndValue.value()); + assertEquals(List.of(true, false), schemaAndValue.value()); } @Test @@ -217,14 +215,14 @@ public void shouldNotParseAsArrayWithoutCommas() { public void shouldParseEmptyMap() { SchemaAndValue schemaAndValue = Values.parseString("{}"); assertEquals(Type.MAP, schemaAndValue.schema().type()); - assertEquals(Collections.emptyMap(), schemaAndValue.value()); + assertEquals(Map.of(), schemaAndValue.value()); } @Test public void shouldParseEmptyArray() { SchemaAndValue schemaAndValue = Values.parseString("[]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); - assertEquals(Collections.emptyList(), schemaAndValue.value()); + assertEquals(List.of(), schemaAndValue.value()); } @Test @@ -468,16 +466,16 @@ public void shouldConvertIntegralTypesToDouble() { @Test public void shouldParseStringListWithMultipleElementTypes() { assertParseStringArrayWithNoSchema( - Arrays.asList((byte) 1, (byte) 2, (short) 300, "four"), + List.of((byte) 1, (byte) 2, (short) 300, "four"), "[1, 2, 300, \"four\"]"); assertParseStringArrayWithNoSchema( - Arrays.asList((byte) 2, (short) 300, "four", (byte) 1), + List.of((byte) 2, (short) 300, "four", (byte) 1), "[2, 300, \"four\", 1]"); assertParseStringArrayWithNoSchema( - Arrays.asList((short) 300, "four", (byte) 1, (byte) 2), + List.of((short) 300, "four", (byte) 1, (byte) 2), "[300, \"four\", 1, 2]"); assertParseStringArrayWithNoSchema( - Arrays.asList("four", (byte) 1, (byte) 2, (short) 300), + List.of("four", (byte) 1, (byte) 2, (short) 300), "[\"four\", 1, 2, 300]"); } @@ -648,7 +646,7 @@ public void shouldParseDateStringAsDateInArray() throws Exception { assertEquals(Type.INT32, elementSchema.type()); assertEquals(Date.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(dateStr); - assertEquals(Collections.singletonList(expected), result.value()); + assertEquals(List.of(expected), result.value()); } @Test @@ -661,7 +659,7 @@ public void shouldParseTimeStringAsTimeInArray() throws Exception { assertEquals(Type.INT32, elementSchema.type()); assertEquals(Time.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(Collections.singletonList(expected), result.value()); + assertEquals(List.of(expected), result.value()); } @Test @@ -674,7 +672,7 @@ public void shouldParseTimestampStringAsTimestampInArray() throws Exception { assertEquals(Type.INT64, elementSchema.type()); assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr); - assertEquals(Collections.singletonList(expected), result.value()); + assertEquals(List.of(expected), result.value()); } @Test @@ -691,7 +689,7 @@ public void shouldParseMultipleTimestampStringAsTimestampInArray() throws Except java.util.Date expected1 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr1); java.util.Date expected2 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr2); java.util.Date expected3 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr3); - assertEquals(Arrays.asList(expected1, expected2, expected3), result.value()); + assertEquals(List.of(expected1, expected2, expected3), result.value()); } @Test @@ -707,7 +705,7 @@ public void shouldParseQuotedTimeStringAsTimeInMap() throws Exception { assertEquals(Type.INT32, valueSchema.type()); assertEquals(Time.LOGICAL_NAME, valueSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(Collections.singletonMap(keyStr, expected), result.value()); + assertEquals(Map.of(keyStr, expected), result.value()); } @Test @@ -723,7 +721,7 @@ public void shouldParseTimeStringAsTimeInMap() throws Exception { assertEquals(Type.INT32, valueSchema.type()); assertEquals(Time.LOGICAL_NAME, valueSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(Collections.singletonMap(keyStr, expected), result.value()); + assertEquals(Map.of(keyStr, expected), result.value()); } @Test @@ -904,7 +902,7 @@ public void shouldConvertDateValues() { // ISO8601 strings - accept a string matching pattern "yyyy-MM-dd" LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.DAYS); - java.util.Date d3 = Values.convertToDate(Date.SCHEMA, LocalDate.ofEpochDay(days).format(DateTimeFormatter.ISO_LOCAL_DATE)); + java.util.Date d3 = Values.convertToDate(Date.SCHEMA, localTime.format(DateTimeFormatter.ISO_LOCAL_DATE)); LocalDateTime date3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(d3.getTime()), ZoneId.systemDefault()); assertEquals(localTimeTruncated, date3); @@ -991,25 +989,25 @@ public void shouldInferStructSchema() { @Test public void shouldInferNoSchemaForEmptyList() { - Schema listSchema = Values.inferSchema(Collections.emptyList()); + Schema listSchema = Values.inferSchema(List.of()); assertNull(listSchema); } @Test public void shouldInferNoSchemaForListContainingObject() { - Schema listSchema = Values.inferSchema(Collections.singletonList(new Object())); + Schema listSchema = Values.inferSchema(List.of(new Object())); assertNull(listSchema); } @Test public void shouldInferNoSchemaForEmptyMap() { - Schema listSchema = Values.inferSchema(Collections.emptyMap()); + Schema listSchema = Values.inferSchema(Map.of()); assertNull(listSchema); } @Test public void shouldInferNoSchemaForMapContainingObject() { - Schema listSchema = Values.inferSchema(Collections.singletonMap(new Object(), new Object())); + Schema listSchema = Values.inferSchema(Map.of(new Object(), new Object())); assertNull(listSchema); } @@ -1019,7 +1017,7 @@ public void shouldInferNoSchemaForMapContainingObject() { */ @Test public void shouldNotConvertArrayValuesToDecimal() { - List decimals = Arrays.asList("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), + List decimals = List.of("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE), (byte) 1, (byte) 1); List expected = new ArrayList<>(decimals); // most values are directly reproduced with the same type expected.set(0, "1.0"); // The quotes are parsed away, but the value remains a string @@ -1032,7 +1030,7 @@ public void shouldNotConvertArrayValuesToDecimal() { @Test public void shouldParseArrayOfOnlyDecimals() { - List decimals = Arrays.asList(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), + List decimals = List.of(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE)); SchemaAndValue schemaAndValue = Values.parseString(decimals.toString()); Schema schema = schemaAndValue.schema(); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java b/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java index 44073f7722927..52aeff4a1a2cc 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java @@ -37,10 +37,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; -import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.TimeZone; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -380,9 +381,9 @@ public void shouldValidateBuildInTypes() { assertSchemaMatches(Schema.FLOAT64_SCHEMA, 1.0d); assertSchemaMatches(Schema.STRING_SCHEMA, "value"); assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), new ArrayList()); - assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), Collections.singletonList("value")); + assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), List.of("value")); assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), new HashMap()); - assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), Collections.singletonMap("a", 0)); + assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), Map.of("a", 0)); Schema emptyStructSchema = SchemaBuilder.struct(); assertSchemaMatches(emptyStructSchema, new Struct(emptyStructSchema)); Schema structSchema = SchemaBuilder.struct().field("foo", Schema.OPTIONAL_BOOLEAN_SCHEMA).field("bar", Schema.STRING_SCHEMA) diff --git a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java index 90bd4f897df28..32716da897567 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java @@ -25,7 +25,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -38,8 +37,8 @@ public class SourceRecordTest { - private static final Map SOURCE_PARTITION = Collections.singletonMap("src", "abc"); - private static final Map SOURCE_OFFSET = Collections.singletonMap("offset", "1"); + private static final Map SOURCE_PARTITION = Map.of("src", "abc"); + private static final Map SOURCE_OFFSET = Map.of("offset", "1"); private static final String TOPIC_NAME = "myTopic"; private static final Integer PARTITION_NUMBER = 0; private static final Long KAFKA_TIMESTAMP = 0L; diff --git a/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java b/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java index 463125e09404f..119d0594a8fdc 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -58,7 +58,7 @@ public void testToBytesIgnoresSchema() { @Test public void testToBytesNonUtf8Encoding() { - converter.configure(Collections.singletonMap("converter.encoding", StandardCharsets.UTF_16.name()), true); + converter.configure(Map.of("converter.encoding", StandardCharsets.UTF_16.name()), true); assertArrayEquals(SAMPLE_STRING.getBytes(StandardCharsets.UTF_16), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING)); } @@ -78,7 +78,7 @@ public void testBytesNullToString() { @Test public void testBytesToStringNonUtf8Encoding() { - converter.configure(Collections.singletonMap("converter.encoding", StandardCharsets.UTF_16.name()), true); + converter.configure(Map.of("converter.encoding", StandardCharsets.UTF_16.name()), true); SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes(StandardCharsets.UTF_16)); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(SAMPLE_STRING, data.value()); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java index 1972ff7a89d58..0a72a0a181c86 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java @@ -18,8 +18,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -27,37 +25,37 @@ public class ConnectorUtilsTest { - private static final List FIVE_ELEMENTS = Arrays.asList(1, 2, 3, 4, 5); + private static final List FIVE_ELEMENTS = List.of(1, 2, 3, 4, 5); @Test public void testGroupPartitions() { List> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1); - assertEquals(Collections.singletonList(FIVE_ELEMENTS), grouped); + assertEquals(List.of(FIVE_ELEMENTS), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2); - assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped); + assertEquals(List.of(List.of(1, 2, 3), List.of(4, 5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3); - assertEquals(Arrays.asList(Arrays.asList(1, 2), - Arrays.asList(3, 4), - Collections.singletonList(5)), grouped); + assertEquals(List.of(List.of(1, 2), + List.of(3, 4), + List.of(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5); - assertEquals(Arrays.asList(Collections.singletonList(1), - Collections.singletonList(2), - Collections.singletonList(3), - Collections.singletonList(4), - Collections.singletonList(5)), grouped); + assertEquals(List.of(List.of(1), + List.of(2), + List.of(3), + List.of(4), + List.of(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7); - assertEquals(Arrays.asList(Collections.singletonList(1), - Collections.singletonList(2), - Collections.singletonList(3), - Collections.singletonList(4), - Collections.singletonList(5), - Collections.emptyList(), - Collections.emptyList()), grouped); + assertEquals(List.of(List.of(1), + List.of(2), + List.of(3), + List.of(4), + List.of(5), + List.of(), + List.of()), grouped); } @Test diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java index d404bdc7dc19a..5c1b0ee454084 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java @@ -26,9 +26,7 @@ import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -54,10 +52,10 @@ public class JaasBasicAuthFilter implements ContainerRequestFilter { private static final Logger log = LoggerFactory.getLogger(JaasBasicAuthFilter.class); - private static final Set INTERNAL_REQUEST_MATCHERS = new HashSet<>(Arrays.asList( + private static final Set INTERNAL_REQUEST_MATCHERS = Set.of( new RequestMatcher(HttpMethod.POST, "/?connectors/([^/]+)/tasks/?"), new RequestMatcher(HttpMethod.PUT, "/?connectors/[^/]+/fence/?") - )); + ); private static final String CONNECT_LOGIN_MODULE = "KafkaConnect"; static final String AUTHORIZATION = "Authorization"; diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java index 81f3a7327d576..d8439309e1d22 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java @@ -26,7 +26,7 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Collections; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -107,7 +107,7 @@ public void testBadJaasConfigExtensionSetup() { BasicAuthSecurityRestExtension extension = new BasicAuthSecurityRestExtension(configuration); - Exception thrownException = assertThrows(Exception.class, () -> extension.configure(Collections.emptyMap())); + Exception thrownException = assertThrows(Exception.class, () -> extension.configure(Map.of())); assertEquals(jaasConfigurationException, thrownException); thrownException = assertThrows(Exception.class, () -> extension.register(mock(ConnectRestExtensionContext.class))); diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java index bcd6e0ab31995..d168e6466db2d 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java @@ -32,7 +32,6 @@ import java.nio.file.Files; import java.util.ArrayList; import java.util.Base64; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -261,8 +260,8 @@ private File setupPropertyLoginFile(boolean includeUsers) throws IOException { private JaasBasicAuthFilter setupJaasFilter(String name, String credentialFilePath) { TestJaasConfig configuration = new TestJaasConfig(); Map moduleOptions = credentialFilePath != null - ? Collections.singletonMap("file", credentialFilePath) - : Collections.emptyMap(); + ? Map.of("file", credentialFilePath) + : Map.of(); configuration.addEntry(name, LOGIN_MODULE, moduleOptions); return new JaasBasicAuthFilter(configuration); } diff --git a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java index 79478c57d1fa4..8a76a480a7ae6 100644 --- a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java +++ b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java @@ -90,7 +90,7 @@ public List poll() throws InterruptedException { if (stream == null) { try { stream = Files.newInputStream(Paths.get(filename)); - Map offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); + Map offset = context.offsetStorageReader().offset(Map.of(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD); if (lastRecordedOffset != null && !(lastRecordedOffset instanceof Long)) diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java index dde20105e3731..cda7a771c51d2 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java @@ -32,9 +32,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -58,19 +57,19 @@ public void setup() { @Test public void testPutFlush() { - HashMap offsets = new HashMap<>(); + Map offsets = new HashMap<>(); final String newLine = System.lineSeparator(); // We do not call task.start() since it would override the output stream - task.put(Collections.singletonList( + task.put(List.of( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1" + newLine, os.toString()); - task.put(Arrays.asList( + task.put(List.of( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1) )); @@ -88,7 +87,7 @@ public void testStart() throws IOException { task.start(props); HashMap offsets = new HashMap<>(); - task.put(Collections.singletonList( + task.put(List.of( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line0", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); @@ -99,7 +98,7 @@ public void testStart() throws IOException { int i = 0; try (BufferedReader reader = Files.newBufferedReader(Paths.get(outputFile))) { lines[i++] = reader.readLine(); - task.put(Arrays.asList( + task.put(List.of( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line2", 1) )); diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java index e0c14a1e6cb19..94c046182f199 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java @@ -158,48 +158,48 @@ public void testInvalidBatchSize() { @Test public void testAlterOffsetsStdin() { sourceProperties.remove(FileStreamSourceConnector.FILE_CONFIG); - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap(FILENAME_FIELD, FILENAME), - Collections.singletonMap(POSITION_FIELD, 0L) + Map, Map> offsets = Map.of( + Map.of(FILENAME_FIELD, FILENAME), + Map.of(POSITION_FIELD, 0L) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsIncorrectPartitionKey() { - assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap( - Collections.singletonMap("other_partition_key", FILENAME), - Collections.singletonMap(POSITION_FIELD, 0L) + assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Map.of( + Map.of("other_partition_key", FILENAME), + Map.of(POSITION_FIELD, 0L) ))); // null partitions are invalid assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap( null, - Collections.singletonMap(POSITION_FIELD, 0L) + Map.of(POSITION_FIELD, 0L) ))); } @Test public void testAlterOffsetsMultiplePartitions() { Map, Map> offsets = new HashMap<>(); - offsets.put(Collections.singletonMap(FILENAME_FIELD, FILENAME), Collections.singletonMap(POSITION_FIELD, 0L)); + offsets.put(Map.of(FILENAME_FIELD, FILENAME), Map.of(POSITION_FIELD, 0L)); offsets.put(Collections.singletonMap(FILENAME_FIELD, "/someotherfilename"), null); assertTrue(connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsIncorrectOffsetKey() { - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap(FILENAME_FIELD, FILENAME), - Collections.singletonMap("other_offset_key", 0L) + Map, Map> offsets = Map.of( + Map.of(FILENAME_FIELD, FILENAME), + Map.of("other_offset_key", 0L) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsOffsetPositionValues() { - Function alterOffsets = offset -> connector.alterOffsets(sourceProperties, Collections.singletonMap( - Collections.singletonMap(FILENAME_FIELD, FILENAME), + Function alterOffsets = offset -> connector.alterOffsets(sourceProperties, Map.of( + Map.of(FILENAME_FIELD, FILENAME), Collections.singletonMap(POSITION_FIELD, offset) )); @@ -217,9 +217,9 @@ public void testAlterOffsetsOffsetPositionValues() { @Test public void testSuccessfulAlterOffsets() { - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap(FILENAME_FIELD, FILENAME), - Collections.singletonMap(POSITION_FIELD, 0L) + Map, Map> offsets = Map.of( + Map.of(FILENAME_FIELD, FILENAME), + Map.of(POSITION_FIELD, 0L) ); // Expect no exception to be thrown when a valid offsets map is passed. An empty offsets map is treated as valid @@ -237,9 +237,9 @@ public void testAlterOffsetsTombstones() { ); assertTrue(alterOffsets.apply(null)); - assertTrue(alterOffsets.apply(Collections.emptyMap())); - assertTrue(alterOffsets.apply(Collections.singletonMap(FILENAME_FIELD, FILENAME))); - assertTrue(alterOffsets.apply(Collections.singletonMap(FILENAME_FIELD, "/someotherfilename"))); - assertTrue(alterOffsets.apply(Collections.singletonMap("garbage_partition_key", "garbage_partition_value"))); + assertTrue(alterOffsets.apply(Map.of())); + assertTrue(alterOffsets.apply(Map.of(FILENAME_FIELD, FILENAME))); + assertTrue(alterOffsets.apply(Map.of(FILENAME_FIELD, "/someotherfilename"))); + assertTrue(alterOffsets.apply(Map.of("garbage_partition_key", "garbage_partition_value"))); } } diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java index e0e77a8433c72..c8118faf1589e 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java @@ -30,7 +30,6 @@ import java.io.OutputStream; import java.nio.file.Files; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -87,8 +86,8 @@ public void testNormalLifecycle() throws InterruptedException, IOException { assertEquals(1, records.size()); assertEquals(TOPIC, records.get(0).topic()); assertEquals("partial line finished", records.get(0).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset()); assertNull(task.poll()); // Different line endings, and make sure the final \r doesn't result in a line until we can @@ -98,25 +97,25 @@ public void testNormalLifecycle() throws InterruptedException, IOException { records = task.poll(); assertEquals(4, records.size()); assertEquals("line1", records.get(0).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset()); assertEquals("line2", records.get(1).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset()); assertEquals("line3", records.get(2).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset()); assertEquals("line4", records.get(3).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset()); os.write("subsequent text".getBytes()); os.flush(); records = task.poll(); assertEquals(1, records.size()); assertEquals("", records.get(0).value()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset()); + assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset()); os.close(); task.stop(); diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java index 577b07bb5bdb4..51096d32107ba 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java @@ -30,7 +30,6 @@ import java.io.PrintStream; import java.nio.file.Files; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -130,8 +129,8 @@ public void testAlterOffsets() throws Exception { // Alter the offsets to make the connector re-process the last line in the file connect.alterSourceConnectorOffset( CONNECTOR_NAME, - Collections.singletonMap(FILENAME_FIELD, sourceFile.getAbsolutePath()), - Collections.singletonMap(POSITION_FIELD, 28L) + Map.of(FILENAME_FIELD, sourceFile.getAbsolutePath()), + Map.of(POSITION_FIELD, 28L) ); connect.resumeConnector(CONNECTOR_NAME); diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java index 30d68971568f2..dac2ce5674150 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java @@ -53,7 +53,6 @@ import java.util.Collection; import java.util.EnumMap; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import java.util.Set; @@ -103,9 +102,7 @@ public class JsonConverter implements Converter, HeaderConverter, Versioned { if (schema == null || keySchema.type() == Schema.Type.STRING) { if (!value.isObject()) throw new DataException("Maps with string fields should be encoded as JSON objects, but found " + value.getNodeType()); - Iterator> fieldIt = value.fields(); - while (fieldIt.hasNext()) { - Map.Entry entry = fieldIt.next(); + for (Map.Entry entry : value.properties()) { result.put(entry.getKey(), convertToConnect(valueSchema, entry.getValue(), config)); } } else { @@ -152,14 +149,10 @@ public JsonNode toJson(final Schema schema, final Object value, final JsonConver if (!(value instanceof BigDecimal decimal)) throw new DataException("Invalid type for Decimal, expected BigDecimal but was " + value.getClass()); - switch (config.decimalFormat()) { - case NUMERIC: - return JSON_NODE_FACTORY.numberNode(decimal); - case BASE64: - return JSON_NODE_FACTORY.binaryNode(Decimal.fromLogical(schema, decimal)); - default: - throw new DataException("Unexpected " + JsonConverterConfig.DECIMAL_FORMAT_CONFIG + ": " + config.decimalFormat()); - } + return switch (config.decimalFormat()) { + case NUMERIC -> JSON_NODE_FACTORY.numberNode(decimal); + case BASE64 -> JSON_NODE_FACTORY.binaryNode(Decimal.fromLogical(schema, decimal)); + }; } @Override @@ -229,6 +222,7 @@ public Object toConnect(final Schema schema, final JsonNode value) { private JsonConverterConfig config; private Cache fromConnectSchemaCache; private Cache toConnectSchemaCache; + private Schema schema = null; // if a schema is provided in config, this schema will be used for all messages for sink connector private final JsonSerializer serializer; private final JsonDeserializer deserializer; @@ -291,6 +285,16 @@ public void configure(Map configs) { fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize())); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize())); + + try { + final byte[] schemaContent = config.schemaContent(); + if (schemaContent != null) { + final JsonNode schemaNode = deserializer.deserialize("", schemaContent); + this.schema = asConnectSchema(schemaNode); + } + } catch (SerializationException e) { + throw new DataException("Failed to parse schema in converter config due to serialization error: ", e); + } } @Override @@ -345,13 +349,16 @@ public SchemaAndValue toConnectData(String topic, byte[] value) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } - if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) - throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + + if (config.schemasEnabled()) { + if (schema != null) { + return new SchemaAndValue(schema, convertToConnect(schema, jsonValue, config)); + } else if (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)) { + throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); - - // The deserialized data should either be an envelope object containing the schema and the payload or the schema - // was stripped during serialization and we need to fill in an all-encompassing schema. - if (!config.schemasEnabled()) { + } + } else { + // The deserialized data should either be an envelope object containing the schema and the payload or the schema + // was stripped during serialization and we need to fill in an all-encompassing schema. ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); @@ -540,9 +547,7 @@ public Schema asConnectSchema(JsonNode jsonSchema) { JsonNode schemaParamsNode = jsonSchema.get(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME); if (schemaParamsNode != null && schemaParamsNode.isObject()) { - Iterator> paramsIt = schemaParamsNode.fields(); - while (paramsIt.hasNext()) { - Map.Entry entry = paramsIt.next(); + for (Map.Entry entry : schemaParamsNode.properties()) { JsonNode paramValue = entry.getValue(); if (!paramValue.isTextual()) throw new DataException("Schema parameters must have string values."); diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java index f02d54ac26307..17d48c7f14c3d 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.connect.storage.ConverterConfig; +import java.nio.charset.StandardCharsets; import java.util.Locale; import java.util.Map; @@ -35,6 +36,12 @@ public final class JsonConverterConfig extends ConverterConfig { private static final String SCHEMAS_ENABLE_DOC = "Include schemas within each of the serialized values and keys."; private static final String SCHEMAS_ENABLE_DISPLAY = "Enable Schemas"; + public static final String SCHEMA_CONTENT_CONFIG = "schema.content"; + public static final String SCHEMA_CONTENT_DEFAULT = null; + private static final String SCHEMA_CONTENT_DOC = "When set, this is used as the schema for all messages, and the schemas within each of the message will be ignored." + + "Otherwise, the schema will be included in the content of each message. This configuration applies only 'schemas.enable' is true, and it exclusively affects the sink connector."; + private static final String SCHEMA_CONTENT_DISPLAY = "Schema Content"; + public static final String SCHEMAS_CACHE_SIZE_CONFIG = "schemas.cache.size"; public static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000; private static final String SCHEMAS_CACHE_SIZE_DOC = "The maximum number of schemas that can be cached in this converter instance."; @@ -61,6 +68,8 @@ public final class JsonConverterConfig extends ConverterConfig { orderInGroup++, Width.MEDIUM, SCHEMAS_ENABLE_DISPLAY); CONFIG.define(SCHEMAS_CACHE_SIZE_CONFIG, Type.INT, SCHEMAS_CACHE_SIZE_DEFAULT, Importance.HIGH, SCHEMAS_CACHE_SIZE_DOC, group, orderInGroup++, Width.MEDIUM, SCHEMAS_CACHE_SIZE_DISPLAY); + CONFIG.define(SCHEMA_CONTENT_CONFIG, Type.STRING, SCHEMA_CONTENT_DEFAULT, Importance.HIGH, SCHEMA_CONTENT_DOC, group, + orderInGroup++, Width.MEDIUM, SCHEMA_CONTENT_DISPLAY); group = "Serialization"; orderInGroup = 0; @@ -86,6 +95,7 @@ public static ConfigDef configDef() { private final int schemaCacheSize; private final DecimalFormat decimalFormat; private final boolean replaceNullWithDefault; + private final byte[] schemaContent; public JsonConverterConfig(Map props) { super(CONFIG, props); @@ -93,6 +103,10 @@ public JsonConverterConfig(Map props) { this.schemaCacheSize = getInt(SCHEMAS_CACHE_SIZE_CONFIG); this.decimalFormat = DecimalFormat.valueOf(getString(DECIMAL_FORMAT_CONFIG).toUpperCase(Locale.ROOT)); this.replaceNullWithDefault = getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG); + String schemaContentStr = getString(SCHEMA_CONTENT_CONFIG); + this.schemaContent = (schemaContentStr == null || schemaContentStr.isEmpty()) + ? null + : schemaContentStr.getBytes(StandardCharsets.UTF_8); } /** @@ -130,4 +144,15 @@ public boolean replaceNullWithDefault() { return replaceNullWithDefault; } + /** + * If a default schema is provided in the converter config, this will be + * used for all messages. + * + * This is only relevant if schemas are enabled. + * + * @return Schema Contents, will return null if no value is provided + */ + public byte[] schemaContent() { + return schemaContent; + } } diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java index 775768f37d31d..f88c1d838abc1 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java @@ -26,7 +26,6 @@ import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.module.blackbird.BlackbirdModule; -import java.util.Collections; import java.util.Set; /** @@ -40,7 +39,7 @@ public class JsonDeserializer implements Deserializer { * Default constructor needed by Kafka */ public JsonDeserializer() { - this(Collections.emptySet(), new JsonNodeFactory(true), true); + this(Set.of(), new JsonNodeFactory(true), true); } /** diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java index e40f530469af6..8f6adfaf1ff38 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java @@ -25,7 +25,6 @@ import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.module.blackbird.BlackbirdModule; -import java.util.Collections; import java.util.Set; /** @@ -39,7 +38,7 @@ public class JsonSerializer implements Serializer { * Default constructor needed by Kafka */ public JsonSerializer() { - this(Collections.emptySet(), new JsonNodeFactory(true), true); + this(Set.of(), new JsonNodeFactory(true), true); } /** diff --git a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java index 930fb3bb4b84e..6b2eabaab1e06 100644 --- a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java +++ b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java @@ -35,7 +35,7 @@ public void shouldBeCaseInsensitiveForDecimalFormatConfig() { configValues.put(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, "NuMeRiC"); final JsonConverterConfig config = new JsonConverterConfig(configValues); - assertEquals(config.decimalFormat(), DecimalFormat.NUMERIC); + assertEquals(DecimalFormat.NUMERIC, config.decimalFormat()); } } \ No newline at end of file diff --git a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java index d79c8527b3c21..200b33d1774e2 100644 --- a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java +++ b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java @@ -36,6 +36,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; @@ -44,12 +46,11 @@ import java.net.URISyntaxException; import java.net.URL; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Calendar; -import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -74,7 +75,7 @@ public class JsonConverterTest { @BeforeEach public void setUp() { - converter.configure(Collections.emptyMap(), false); + converter.configure(Map.of(), false); } // Schema metadata @@ -155,7 +156,7 @@ public void stringToConnect() { @Test public void arrayToConnect() { byte[] arrayJson = "{ \"schema\": { \"type\": \"array\", \"items\": { \"type\" : \"int32\" } }, \"payload\": [1, 2, 3] }".getBytes(); - assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson)); + assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson)); } @Test @@ -211,7 +212,7 @@ public void nullToConnect() { @Test public void emptyBytesToConnect() { // This characterizes the messages with empty data when Json schemas is disabled - Map props = Collections.singletonMap("schemas.enable", false); + Map props = Map.of("schemas.enable", false); converter.configure(props, true); SchemaAndValue converted = converter.toConnectData(TOPIC, "".getBytes()); assertEquals(SchemaAndValue.NULL, converted); @@ -223,7 +224,7 @@ public void emptyBytesToConnect() { @Test public void schemalessWithEmptyFieldValueToConnect() { // This characterizes the messages with empty data when Json schemas is disabled - Map props = Collections.singletonMap("schemas.enable", false); + Map props = Map.of("schemas.enable", false); converter.configure(props, true); String input = "{ \"a\": \"\", \"b\": null}"; SchemaAndValue converted = converter.toConnectData(TOPIC, input.getBytes()); @@ -254,7 +255,7 @@ public void nullSchemaPrimitiveToConnect() { assertEquals(new SchemaAndValue(null, "a string"), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": [1, \"2\", 3] }".getBytes()); - assertEquals(new SchemaAndValue(null, Arrays.asList(1L, "2", 3L)), converted); + assertEquals(new SchemaAndValue(null, List.of(1L, "2", 3L)), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": { \"field1\": 1, \"field2\": 2} }".getBytes()); Map obj = new HashMap<>(); @@ -587,7 +588,7 @@ public void stringToJson() { @Test public void arrayToJson() { Schema int32Array = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); - JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, Arrays.asList(1, 2, 3))); + JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, List.of(1, 2, 3))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"array\", \"items\": { \"type\": \"int32\", \"optional\": false }, \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); @@ -626,8 +627,8 @@ public void mapToJsonNonStringKeys() { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add(1).add(12), - JsonNodeFactory.instance.arrayNode().add(2).add(15))), + assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add(1).add(12), + JsonNodeFactory.instance.arrayNode().add(2).add(15)), payloadEntries ); } @@ -675,7 +676,7 @@ public void decimalToJson() throws IOException { @Test public void decimalToNumericJson() { - converter.configure(Collections.singletonMap(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); + converter.configure(Map.of(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); JsonNode converted = parse(converter.fromConnectData(TOPIC, Decimal.schema(2), new BigDecimal(new BigInteger("156"), 2))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"2\" } }"), @@ -686,7 +687,7 @@ public void decimalToNumericJson() { @Test public void decimalWithTrailingZerosToNumericJson() { - converter.configure(Collections.singletonMap(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); + converter.configure(Map.of(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); JsonNode converted = parse(converter.fromConnectData(TOPIC, Decimal.schema(4), new BigDecimal(new BigInteger("15600"), 4))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"4\" } }"), @@ -766,7 +767,7 @@ public void nullSchemaAndPrimitiveToJson() { public void nullSchemaAndArrayToJson() { // This still needs to do conversion of data, null schema means "anything goes". Make sure we mix and match // types to verify conversion still works. - JsonNode converted = parse(converter.fromConnectData(TOPIC, null, Arrays.asList(1, "string", true))); + JsonNode converted = parse(converter.fromConnectData(TOPIC, null, List.of(1, "string", true))); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add("string").add(true), @@ -805,9 +806,9 @@ public void nullSchemaAndMapNonStringKeysToJson() { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), + assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), - JsonNodeFactory.instance.arrayNode().add(false).add(true))), + JsonNodeFactory.instance.arrayNode().add(false).add(true)), payloadEntries ); } @@ -815,7 +816,7 @@ public void nullSchemaAndMapNonStringKeysToJson() { @Test public void nullSchemaAndNullValueToJson() { // This characterizes the production of tombstone messages when Json schemas is enabled - Map props = Collections.singletonMap("schemas.enable", true); + Map props = Map.of("schemas.enable", true); converter.configure(props, true); byte[] converted = converter.fromConnectData(TOPIC, null, null); assertNull(converted); @@ -824,7 +825,7 @@ public void nullSchemaAndNullValueToJson() { @Test public void nullValueToJson() { // This characterizes the production of tombstone messages when Json schemas is not enabled - Map props = Collections.singletonMap("schemas.enable", false); + Map props = Map.of("schemas.enable", false); converter.configure(props, true); byte[] converted = converter.fromConnectData(TOPIC, null, null); assertNull(converted); @@ -839,14 +840,14 @@ public void mismatchSchemaJson() { @Test public void noSchemaToConnect() { - Map props = Collections.singletonMap("schemas.enable", false); + Map props = Map.of("schemas.enable", false); converter.configure(props, true); assertEquals(new SchemaAndValue(null, true), converter.toConnectData(TOPIC, "true".getBytes())); } @Test public void noSchemaToJson() { - Map props = Collections.singletonMap("schemas.enable", false); + Map props = Map.of("schemas.enable", false); converter.configure(props, true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, true)); assertTrue(converted.isBoolean()); @@ -876,7 +877,7 @@ public void testJsonSchemaCacheSizeFromConfigFile() throws URISyntaxException, I File propFile = new File(url.toURI()); String workerPropsFile = propFile.getAbsolutePath(); Map workerProps = !workerPropsFile.isEmpty() ? - Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap(); + Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Map.of(); JsonConverter rc = new JsonConverter(); rc.configure(workerProps, false); @@ -901,7 +902,7 @@ public void stringHeaderToConnect() { @Test public void serializeNullToDefault() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, null)); JsonNode expected = parse("{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":\"default\"}"); @@ -910,7 +911,7 @@ public void serializeNullToDefault() { @Test public void serializeNullToNull() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, null)); JsonNode expected = parse("{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"); @@ -919,7 +920,7 @@ public void serializeNullToNull() { @Test public void deserializeNullToDefault() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); String value = "{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); assertEquals("default", sav.value()); @@ -927,7 +928,7 @@ public void deserializeNullToDefault() { @Test public void deserializeNullToNull() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); String value = "{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); assertNull(sav.value()); @@ -935,7 +936,7 @@ public void deserializeNullToNull() { @Test public void serializeFieldNullToDefault() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); Schema structSchema = SchemaBuilder.struct().field("field1", schema).build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, structSchema, new Struct(structSchema))); @@ -945,7 +946,7 @@ public void serializeFieldNullToDefault() { @Test public void serializeFieldNullToNull() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); Schema structSchema = SchemaBuilder.struct().field("field1", schema).build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, structSchema, new Struct(structSchema))); @@ -955,7 +956,7 @@ public void serializeFieldNullToNull() { @Test public void deserializeFieldNullToDefault() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); String value = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"field\":\"field1\",\"type\":\"string\",\"optional\":true,\"default\":\"default\"}],\"optional\":false},\"payload\":{\"field1\":null}}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); @@ -965,7 +966,7 @@ public void deserializeFieldNullToDefault() { @Test public void deserializeFieldNullToNull() { - converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); String value = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"field\":\"field1\",\"type\":\"string\",\"optional\":true,\"default\":\"default\"}],\"optional\":false},\"payload\":{\"field1\":null}}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); @@ -978,6 +979,58 @@ public void testVersionRetrievedFromAppInfoParser() { assertEquals(AppInfoParser.getVersion(), converter.version()); } + @Test + public void testSchemaContentIsNull() { + Map config = new HashMap<>(); + config.put(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, null); + converter.configure(config, false); + byte[] jsonBytes = "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes(); + SchemaAndValue result = converter.toConnectData(TOPIC, jsonBytes); + assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), result); + } + + @Test + public void testSchemaContentIsEmptyString() { + converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, ""), false); + assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes())); + } + + @Test + public void testSchemaContentValidSchema() { + converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"type\": \"string\" }"), false); + assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "\"foo-bar-baz\"".getBytes())); + } + + @Test + public void testSchemaContentInValidSchema() { + assertThrows( + DataException.class, + () -> converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"string\" }"), false), + " Provided schema is invalid , please recheck the schema you have provided"); + } + + @Test + public void testSchemaContentLooksLikeSchema() { + converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"type\": \"struct\", \"fields\": [{\"field\": \"schema\", \"type\": \"struct\",\"fields\": [{\"field\": \"type\", \"type\": \"string\" }]}, {\"field\": \"payload\", \"type\": \"string\"}]}"), false); + SchemaAndValue connectData = converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes()); + assertEquals("foo-bar-baz", ((Struct) connectData.value()).getString("payload")); + } + + @ParameterizedTest + @ValueSource(strings = { + "{ }", + "{ \"wrong\": \"schema\" }", + "{ \"schema\": { \"type\": \"string\" } }", + "{ \"payload\": \"foo-bar-baz\" }", + "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\", \"extra\": \"field\" }", + }) + public void testNullSchemaContentWithWrongConnectDataValue(String value) { + converter.configure(Map.of(), false); + assertThrows( + DataException.class, + () -> converter.toConnectData(TOPIC, value.getBytes())); + } + private JsonNode parse(byte[] json) { try { return objectMapper.readTree(json); diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java index 0b74b64ebbb4e..06dec5b25ba38 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java @@ -33,7 +33,6 @@ import java.time.Duration; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -165,7 +164,7 @@ public Map remoteConsumerOffsets(String consu // to use ReplicationPolicy to create the checkpoint topic here. String checkpointTopic = replicationPolicy.checkpointsTopic(remoteClusterAlias); List checkpointAssignment = - Collections.singletonList(new TopicPartition(checkpointTopic, 0)); + List.of(new TopicPartition(checkpointTopic, 0)); consumer.assign(checkpointAssignment); consumer.seekToBeginning(checkpointAssignment); while (System.currentTimeMillis() < deadline && !endOfStream(consumer, checkpointAssignment)) { diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java index 53a4f9f5f051d..cb42f5fe654ba 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java @@ -32,6 +32,7 @@ import java.util.Map; import static org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString.in; +import static org.apache.kafka.common.config.ConfigDef.NO_DEFAULT_VALUE; /** * Configuration required for {@link MirrorClient} to talk to a given target cluster. @@ -105,7 +106,7 @@ public Map consumerConfig() { public Map producerConfig() { return clientConfig(PRODUCER_CLIENT_PREFIX); } - + private Map clientConfig(String prefix) { Map props = new HashMap<>(valuesWithPrefixOverride(prefix)); props.keySet().retainAll(CLIENT_CONFIG_DEF.names()); @@ -117,7 +118,8 @@ private Map clientConfig(String prefix) { static final ConfigDef CLIENT_CONFIG_DEF = new ConfigDef() .define(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - null, + NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) // security support @@ -129,13 +131,14 @@ private Map clientConfig(String prefix) { CommonClientConfigs.SECURITY_PROTOCOL_DOC) .withClientSslSupport() .withClientSaslSupport(); - + static final ConfigDef CONFIG_DEF = new ConfigDef() .define(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, - Type.STRING, - null, + Type.LIST, + NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), Importance.HIGH, - CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) + CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define( REPLICATION_POLICY_CLASS, ConfigDef.Type.CLASS, diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java index be728a0ebe98a..5e99f6cd74eeb 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java @@ -20,10 +20,9 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -46,7 +45,7 @@ private static class FakeMirrorClient extends MirrorClient { } FakeMirrorClient() { - this(Collections.emptyList()); + this(List.of()); } @Override @@ -94,25 +93,25 @@ public void countHopsForTopicTest() { @Test public void heartbeatTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", "source1.heartbeats", "source2.source1.heartbeats", "source3.heartbeats")); Set heartbeatTopics = client.heartbeatTopics(); - assertEquals(heartbeatTopics, new HashSet<>(Arrays.asList("heartbeats", "source1.heartbeats", - "source2.source1.heartbeats", "source3.heartbeats"))); + assertEquals(heartbeatTopics, Set.of("heartbeats", "source1.heartbeats", + "source2.source1.heartbeats", "source3.heartbeats")); } @Test public void checkpointsTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "checkpoints.internal", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "checkpoints.internal", "source1.checkpoints.internal", "source2.source1.checkpoints.internal", "source3.checkpoints.internal")); Set checkpointTopics = client.checkpointTopics(); - assertEquals(new HashSet<>(Arrays.asList("source1.checkpoints.internal", - "source2.source1.checkpoints.internal", "source3.checkpoints.internal")), checkpointTopics); + assertEquals(Set.of("source1.checkpoints.internal", + "source2.source1.checkpoints.internal", "source3.checkpoints.internal"), checkpointTopics); } @Test public void replicationHopsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.heartbeats")); assertEquals(1, client.replicationHops("source1")); assertEquals(2, client.replicationHops("source2")); @@ -122,7 +121,7 @@ public void replicationHopsTest() throws InterruptedException { @Test public void upstreamClustersTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.source4.source5.heartbeats")); Set sources = client.upstreamClusters(); assertTrue(sources.contains("source1")); @@ -138,7 +137,7 @@ public void upstreamClustersTest() throws InterruptedException { @Test public void testIdentityReplicationUpstreamClusters() throws InterruptedException { // IdentityReplicationPolicy treats heartbeats as a special case, so these should work as usual. - MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), Arrays.asList("topic1", + MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), List.of("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.source4.source5.heartbeats")); Set sources = client.upstreamClusters(); @@ -154,7 +153,7 @@ public void testIdentityReplicationUpstreamClusters() throws InterruptedExceptio @Test public void remoteTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "topic3", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "topic3", "source1.topic4", "source1.source2.topic5", "source3.source4.source5.topic6")); Set remoteTopics = client.remoteTopics(); assertFalse(remoteTopics.contains("topic1")); @@ -168,7 +167,7 @@ public void remoteTopicsTest() throws InterruptedException { @Test public void testIdentityReplicationRemoteTopics() throws InterruptedException { // IdentityReplicationPolicy should consider any topic to be remote. - MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), Arrays.asList( + MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), List.of( "topic1", "topic2", "topic3", "heartbeats", "backup.heartbeats")); Set remoteTopics = client.remoteTopics(); assertTrue(remoteTopics.contains("topic1")); @@ -181,10 +180,10 @@ public void testIdentityReplicationRemoteTopics() throws InterruptedException { @Test public void remoteTopicsSeparatorTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "topic3", + MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "topic3", "source1__topic4", "source1__source2__topic5", "source3__source4__source5__topic6")); ((Configurable) client.replicationPolicy()).configure( - Collections.singletonMap("replication.policy.separator", "__")); + Map.of("replication.policy.separator", "__")); Set remoteTopics = client.remoteTopics(); assertFalse(remoteTopics.contains("topic1")); assertFalse(remoteTopics.contains("topic2")); @@ -197,7 +196,7 @@ public void remoteTopicsSeparatorTest() throws InterruptedException { @Test public void testIdentityReplicationTopicSource() { MirrorClient client = new FakeMirrorClient( - identityReplicationPolicy("primary"), Collections.emptyList()); + identityReplicationPolicy("primary"), List.of()); assertEquals("topic1", client.replicationPolicy() .formatRemoteTopic("primary", "topic1")); assertEquals("primary", client.replicationPolicy() @@ -211,8 +210,7 @@ public void testIdentityReplicationTopicSource() { private ReplicationPolicy identityReplicationPolicy(String source) { IdentityReplicationPolicy policy = new IdentityReplicationPolicy(); - policy.configure(Collections.singletonMap( - IdentityReplicationPolicy.SOURCE_CLUSTER_ALIAS_CONFIG, source)); + policy.configure(Map.of(IdentityReplicationPolicy.SOURCE_CLUSTER_ALIAS_CONFIG, source)); return policy; } } diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java index 86aaf8ffd0e2b..e59348b05a494 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java @@ -20,7 +20,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -33,7 +32,7 @@ public class ReplicationPolicyTest { @BeforeEach public void setUp() { - DEFAULT_REPLICATION_POLICY.configure(Collections.emptyMap()); + DEFAULT_REPLICATION_POLICY.configure(Map.of()); } @Test diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java index 2e88977d93cea..39aea181b6674 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java @@ -30,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -97,7 +96,7 @@ public void update(String group, Map newCheckpoints) public Map get(String group) { Map result = checkpointsPerConsumerGroup.get(group); - return result == null ? null : Collections.unmodifiableMap(result); + return result == null ? null : Map.copyOf(result); } public Map> computeConvertedUpstreamOffset() { diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java index f88ed4e704661..ded82a8571c7f 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java @@ -65,11 +65,13 @@ static class GroupFilterConfig extends AbstractConfig { .define(GROUPS_INCLUDE_CONFIG, Type.LIST, GROUPS_INCLUDE_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, GROUPS_INCLUDE_DOC) .define(GROUPS_EXCLUDE_CONFIG, Type.LIST, GROUPS_EXCLUDE_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, GROUPS_EXCLUDE_DOC); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java index 3fb2859d2dd46..b7625da619ddf 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java @@ -194,12 +194,14 @@ private static ConfigDef defineCheckpointConfig(ConfigDef baseConfig) { GROUPS, ConfigDef.Type.LIST, GROUPS_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, GROUPS_DOC) .define( GROUPS_EXCLUDE, ConfigDef.Type.LIST, GROUPS_EXCLUDE_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, GROUPS_EXCLUDE_DOC) .define( diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java index 74b77850c860a..9f8472c9c35b6 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java @@ -38,7 +38,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -136,7 +135,7 @@ public List> taskConfigs(int maxTasks) { // If the replication is disabled or checkpoint emission is disabled by setting 'emit.checkpoints.enabled' to false, // the interval of checkpoint emission will be negative and no 'MirrorCheckpointTask' will be created. if (!config.enabled() || config.emitCheckpointsInterval().isNegative()) { - return Collections.emptyList(); + return List.of(); } if (knownConsumerGroups == null) { @@ -148,7 +147,7 @@ public List> taskConfigs(int maxTasks) { // If the consumer group is empty, no 'MirrorCheckpointTask' will be created. if (knownConsumerGroups.isEmpty()) { - return Collections.emptyList(); + return List.of(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); @@ -199,7 +198,7 @@ private void refreshConsumerGroups() throws InterruptedException, ExecutionException { // If loadInitialConsumerGroups fails for any reason(e.g., timeout), knownConsumerGroups may be null. // We still want this method to recover gracefully in such cases. - Set knownConsumerGroups = this.knownConsumerGroups == null ? Collections.emptySet() : this.knownConsumerGroups; + Set knownConsumerGroups = this.knownConsumerGroups == null ? Set.of() : this.knownConsumerGroups; Set consumerGroups = findConsumerGroups(); Set newConsumerGroups = new HashSet<>(consumerGroups); newConsumerGroups.removeAll(knownConsumerGroups); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java index 8ace7d1fc3bdb..71e3edebf5b63 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java @@ -26,9 +26,7 @@ import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -38,7 +36,7 @@ class MirrorCheckpointMetrics implements AutoCloseable { private static final String CHECKPOINT_CONNECTOR_GROUP = MirrorCheckpointConnector.class.getSimpleName(); - private static final Set GROUP_TAGS = new HashSet<>(Arrays.asList("source", "target", "group", "topic", "partition")); + private static final Set GROUP_TAGS = Set.of("source", "target", "group", "topic", "partition"); private static final MetricNameTemplate CHECKPOINT_LATENCY = new MetricNameTemplate( "checkpoint-latency-ms", CHECKPOINT_CONNECTOR_GROUP, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java index 254e2bf8ca649..db86fbdb40be7 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java @@ -35,7 +35,6 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -186,7 +185,7 @@ List sourceRecordsForGroup(String group) throws InterruptedExcepti .collect(Collectors.toList()); } catch (ExecutionException e) { log.error("Error querying offsets for consumer group {} on cluster {}.", group, sourceClusterAlias, e); - return Collections.emptyList(); + return List.of(); } } @@ -195,7 +194,7 @@ Map checkpointsForGroup(Map shouldCheckpointTopic(x.getKey().topic())) // Only perform relevant checkpoints filtered by "topic filter" .map(x -> checkpoint(group, x.getKey(), x.getValue())) - .flatMap(o -> o.stream()) // do not emit checkpoints for partitions that don't have offset-syncs + .flatMap(Optional::stream) // do not emit checkpoints for partitions that don't have offset-syncs .filter(x -> x.downstreamOffset() >= 0) // ignore offsets we cannot translate accurately .filter(this::checkpointIsMoreRecent) // do not emit checkpoints for partitions that have a later checkpoint .collect(Collectors.toMap(Checkpoint::topicPartition, Function.identity())); @@ -234,7 +233,7 @@ private Map listConsumerGroupOffsets(String g throws InterruptedException, ExecutionException { if (stopping) { // short circuit if stopping - return Collections.emptyMap(); + return Map.of(); } return adminCall( () -> sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get(), @@ -372,7 +371,7 @@ Map> syncGroupOffset() throws Exe offsetToSync.put(topicPartition, convertedOffset); } - if (offsetToSync.size() == 0) { + if (offsetToSync.isEmpty()) { log.trace("skip syncing the offset for consumer group: {}", consumerGroupId); continue; } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java index a8db4989b297c..3d2cfda6dcc9a 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java @@ -18,9 +18,7 @@ import org.apache.kafka.common.config.ConfigDef; -import java.util.Collections; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; @@ -33,11 +31,7 @@ public MirrorCheckpointTaskConfig(Map props) { } Set taskConsumerGroups() { - List fields = getList(TASK_CONSUMER_GROUPS); - if (fields == null || fields.isEmpty()) { - return Collections.emptySet(); - } - return new HashSet<>(fields); + return new HashSet<>(getList(TASK_CONSUMER_GROUPS)); } MirrorCheckpointMetrics metrics() { @@ -55,7 +49,8 @@ String entityLabel() { .define( TASK_CONSUMER_GROUPS, ConfigDef.Type.LIST, - null, + ConfigDef.NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), ConfigDef.Importance.LOW, TASK_CONSUMER_GROUPS_DOC) .define(TASK_INDEX, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java index 920f1d93d0406..9baf7c1f35cb5 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java @@ -311,6 +311,7 @@ String entityLabel() { CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, ConfigDef.Type.LIST, JmxReporter.class.getName(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define( @@ -320,6 +321,12 @@ String entityLabel() { in(Utils.enumOptions(SecurityProtocol.class)), ConfigDef.Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) + .define(CONFIG_PROVIDERS_CONFIG, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.LOW, + CONFIG_PROVIDERS_DOC) .withClientSslSupport() .withClientSaslSupport(); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java index f9a844fecfa13..201339229379c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java @@ -24,7 +24,6 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceConnector; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -73,10 +72,10 @@ public List> taskConfigs(int maxTasks) { // if the heartbeats emission is disabled by setting `emit.heartbeats.enabled` to `false`, // the interval heartbeat emission will be negative and no `MirrorHeartbeatTask` will be created if (config.emitHeartbeatsInterval().isNegative()) { - return Collections.emptyList(); + return List.of(); } // just need a single task - return Collections.singletonList(config.originalsStrings()); + return List.of(config.originalsStrings()); } @Override diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java index 35c9c8feccb29..0a4a1374ca2a2 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java @@ -22,7 +22,6 @@ import org.apache.kafka.connect.source.SourceTask; import java.time.Duration; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -76,7 +75,7 @@ public List poll() throws InterruptedException { Schema.BYTES_SCHEMA, heartbeat.recordKey(), Schema.BYTES_SCHEMA, heartbeat.recordValue(), timestamp); - return Collections.singletonList(record); + return List.of(record); } @Override diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java index b04a4d7bb7271..6a412112c3f8a 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java @@ -57,7 +57,6 @@ import java.net.URLEncoder; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -232,15 +231,15 @@ private void checkHerder(SourceAndTarget sourceAndTarget) { } private void addHerder(SourceAndTarget sourceAndTarget) { - log.info("creating herder for " + sourceAndTarget.toString()); + log.info("creating herder for {}", sourceAndTarget.toString()); Map workerProps = config.workerConfig(sourceAndTarget); + DistributedConfig distributedConfig = new DistributedConfig(workerProps); String encodedSource = encodePath(sourceAndTarget.source()); String encodedTarget = encodePath(sourceAndTarget.target()); List restNamespace = List.of(encodedSource, encodedTarget); String workerId = generateWorkerId(sourceAndTarget); Plugins plugins = new Plugins(workerProps); plugins.compareAndSwapWithDelegatingLoader(); - DistributedConfig distributedConfig = new DistributedConfig(workerProps); String kafkaClusterId = distributedConfig.kafkaClusterId(); String clientIdBase = ConnectUtils.clientIdBase(distributedConfig); // Create the admin client to be shared by all backing stores for this herder @@ -250,7 +249,7 @@ private void addHerder(SourceAndTarget sourceAndTarget) { SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin, () -> clientIdBase, plugins.newInternalConverter(true, JsonConverter.class.getName(), - Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(distributedConfig); ConnectorClientConfigOverridePolicy clientConfigOverridePolicy = new AllConnectorClientConfigOverridePolicy(); clientConfigOverridePolicy.configure(config.originals()); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java index aba62cf8464ff..33fc2641a394c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java @@ -31,8 +31,6 @@ import org.apache.kafka.connect.runtime.rest.RestServerConfig; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -215,14 +213,9 @@ public Map workerConfig(SourceAndTarget sourceAndTarget) { Set allConfigNames() { Set allNames = new HashSet<>(); - List connectorConfigDefs = Arrays.asList( - MirrorCheckpointConfig.CONNECTOR_CONFIG_DEF, - MirrorSourceConfig.CONNECTOR_CONFIG_DEF, - MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF - ); - for (ConfigDef cd : connectorConfigDefs) { - allNames.addAll(cd.names()); - } + allNames.addAll(MirrorCheckpointConfig.CONNECTOR_CONFIG_DEF.names()); + allNames.addAll(MirrorSourceConfig.CONNECTOR_CONFIG_DEF.names()); + allNames.addAll(MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF.names()); return allNames; } @@ -284,11 +277,11 @@ Map transform(Map props) { return transformed; } - protected static ConfigDef config() { + private static ConfigDef config() { ConfigDef result = new ConfigDef() - .define(CLUSTERS_CONFIG, Type.LIST, Importance.HIGH, CLUSTERS_DOC) + .define(CLUSTERS_CONFIG, Type.LIST, ConfigDef.NO_DEFAULT_VALUE, ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, CLUSTERS_DOC) .define(ENABLE_INTERNAL_REST_CONFIG, Type.BOOLEAN, false, Importance.HIGH, ENABLE_INTERNAL_REST_DOC) - .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, Collections.emptyList(), Importance.LOW, CONFIG_PROVIDERS_DOC) + .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, CONFIG_PROVIDERS_DOC) // security support .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java index dc0da5382338c..2ec663ad2fc32 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java @@ -209,18 +209,21 @@ private static ConfigDef defineSourceConfig(ConfigDef baseConfig) { TOPICS, ConfigDef.Type.LIST, TOPICS_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_DOC) .define( TOPICS_EXCLUDE, ConfigDef.Type.LIST, TOPICS_EXCLUDE_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_EXCLUDE_DOC) .define( CONFIG_PROPERTIES_EXCLUDE, ConfigDef.Type.LIST, CONFIG_PROPERTIES_EXCLUDE_DEFAULT, + ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, CONFIG_PROPERTIES_EXCLUDE_DOC) .define( diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java index a129390b39785..a9d7779673ed7 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java @@ -55,7 +55,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -99,8 +98,8 @@ public class MirrorSourceConnector extends SourceConnector { private String connectorName; private TopicFilter topicFilter; private ConfigPropertyFilter configPropertyFilter; - private List knownSourceTopicPartitions = Collections.emptyList(); - private List knownTargetTopicPartitions = Collections.emptyList(); + private List knownSourceTopicPartitions = List.of(); + private List knownTargetTopicPartitions = List.of(); private ReplicationPolicy replicationPolicy; private int replicationFactor; private Admin sourceAdminClient; @@ -202,7 +201,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { if (!config.enabled() || knownSourceTopicPartitions.isEmpty()) { - return Collections.emptyList(); + return List.of(); } int numTasks = Math.min(maxTasks, knownSourceTopicPartitions.size()); List> roundRobinByTask = new ArrayList<>(numTasks); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java index 7e33967c9f1f0..c297c4c5fcf3d 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java @@ -27,8 +27,6 @@ import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; -import java.util.Arrays; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -62,7 +60,7 @@ class MirrorSourceMetrics implements AutoCloseable { this.source = taskConfig.sourceClusterAlias(); this.metrics = new Metrics(); - Set partitionTags = new HashSet<>(Arrays.asList("source", "target", "topic", "partition")); + Set partitionTags = Set.of("source", "target", "topic", "partition"); recordCount = new MetricNameTemplate( "record-count", SOURCE_CONNECTOR_GROUP, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java index f0c562bbcbb08..aa5d300c00ab9 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigDef; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -35,9 +34,6 @@ public MirrorSourceTaskConfig(Map props) { Set taskTopicPartitions() { List fields = getList(TASK_TOPIC_PARTITIONS); - if (fields == null || fields.isEmpty()) { - return Collections.emptySet(); - } return fields.stream() .map(MirrorUtils::decodeTopicPartition) .collect(Collectors.toSet()); @@ -58,7 +54,8 @@ String entityLabel() { .define( TASK_TOPIC_PARTITIONS, ConfigDef.Type.LIST, - null, + ConfigDef.NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), ConfigDef.Importance.LOW, TASK_TOPIC_PARTITIONS_DOC) .define(TASK_INDEX, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java index d8cbba184a48c..fdf091c106fe2 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java @@ -38,19 +38,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.function.Supplier; import java.util.regex.Pattern; -import static java.util.Collections.singleton; - /** Internal utility methods. */ public final class MirrorUtils { @@ -84,7 +81,7 @@ static Map wrapPartition(TopicPartition topicPartition, String s } public static Map wrapOffset(long offset) { - return Collections.singletonMap(OFFSET_KEY, offset); + return Map.of(OFFSET_KEY, offset); } public static TopicPartition unwrapPartition(Map wrapped) { @@ -265,7 +262,7 @@ static Pattern compilePatternList(List fields) { } static Pattern compilePatternList(String fields) { - return compilePatternList(Arrays.asList(fields.split("\\W*,\\W*"))); + return compilePatternList(List.of(fields.split("\\W*,\\W*"))); } static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) { @@ -277,7 +274,7 @@ static void createCompactedTopic(String topicName, short partitions, short repli CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false); try { - admin.createTopics(singleton(topicDescription), args).values().get(topicName).get(); + admin.createTopics(Set.of(topicDescription), args).values().get(topicName).get(); log.info("Created topic '{}'", topicName); } catch (InterruptedException e) { Thread.interrupted(); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java index c46aac634fba5..6e366573cfef1 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java @@ -25,7 +25,7 @@ import java.nio.ByteBuffer; -public class OffsetSync { +public record OffsetSync(TopicPartition topicPartition, long upstreamOffset, long downstreamOffset) { public static final String TOPIC_KEY = "topic"; public static final String PARTITION_KEY = "partition"; public static final String UPSTREAM_OFFSET_KEY = "upstreamOffset"; @@ -39,28 +39,6 @@ public class OffsetSync { new Field(TOPIC_KEY, Type.STRING), new Field(PARTITION_KEY, Type.INT32)); - private final TopicPartition topicPartition; - private final long upstreamOffset; - private final long downstreamOffset; - - public OffsetSync(TopicPartition topicPartition, long upstreamOffset, long downstreamOffset) { - this.topicPartition = topicPartition; - this.upstreamOffset = upstreamOffset; - this.downstreamOffset = downstreamOffset; - } - - public TopicPartition topicPartition() { - return topicPartition; - } - - public long upstreamOffset() { - return upstreamOffset; - } - - public long downstreamOffset() { - return downstreamOffset; - } - @Override public String toString() { return String.format("OffsetSync{topicPartition=%s, upstreamOffset=%d, downstreamOffset=%d}", diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java index f09cb12b0f060..3cc50819e2c94 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java @@ -28,7 +28,7 @@ import org.glassfish.jersey.server.ResourceConfig; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.Map; public class MirrorRestServer extends RestServer { @@ -48,14 +48,12 @@ public void initializeInternalResources(Map herders) { @Override protected Collection> regularResources() { - return Collections.singletonList( - InternalMirrorResource.class - ); + return List.of(InternalMirrorResource.class); } @Override protected Collection> adminResources() { - return Collections.emptyList(); + return List.of(); } @Override @@ -70,5 +68,4 @@ protected void configure() { bind(restClient).to(RestClient.class); } } - } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java index 476fbcceaef82..fb65a1162e2bf 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java @@ -23,9 +23,7 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -39,8 +37,7 @@ public class CheckpointStoreTest { @Test public void testReadCheckpointsTopic() { - Set consumerGroups = new HashSet<>(); - consumerGroups.add("group1"); + Set consumerGroups = Set.of("group1"); MirrorCheckpointTaskConfig config = mock(MirrorCheckpointTaskConfig.class); when(config.checkpointsTopic()).thenReturn("checkpoint.topic"); @@ -63,7 +60,7 @@ void readCheckpointsImpl(MirrorCheckpointTaskConfig config, Callback> expected = new HashMap<>(); - expected.put("group1", Collections.singletonMap(new TopicPartition("t1", 0), + expected.put("group1", Map.of(new TopicPartition("t1", 0), new Checkpoint("group1", new TopicPartition("t1", 0), 1, 1, ""))); assertEquals(expected, store.checkpointsPerConsumerGroup); } @@ -71,8 +68,7 @@ void readCheckpointsImpl(MirrorCheckpointTaskConfig config, Callback consumerGroups = new HashSet<>(); - consumerGroups.add("group1"); + Set consumerGroups = Set.of("group1"); MirrorCheckpointTaskConfig config = mock(MirrorCheckpointTaskConfig.class); when(config.checkpointsTopic()).thenReturn("checkpoint.topic"); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java index ccd381ceadbc8..1ee27ba0ffe40 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java @@ -20,8 +20,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -37,7 +35,7 @@ public class MirrorCheckpointConfigTest { @Test public void testTaskConfigConsumerGroups() { - List groups = Arrays.asList("consumer-1", "consumer-2", "consumer-3"); + List groups = List.of("consumer-1", "consumer-2", "consumer-3"); MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); Map props = config.taskConfigForConsumerGroups(groups, 1); MirrorCheckpointTaskConfig taskConfig = new MirrorCheckpointTaskConfig(props); @@ -118,7 +116,7 @@ public void testValidateIfConnectorEnabled() { Map configValues = MirrorCheckpointConfig.validate(makeProps( MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "false", MirrorCheckpointConfig.SYNC_GROUP_OFFSETS_ENABLED, "false")); - assertEquals(configValues.keySet(), Collections.singleton(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED)); + assertEquals(configValues.keySet(), Set.of(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED)); configValues = MirrorCheckpointConfig.validate(makeProps(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "true", MirrorCheckpointConfig.EMIT_OFFSET_SYNCS_ENABLED, "false")); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java index 4ae32701bb9f0..ecb07dc529d42 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java @@ -26,8 +26,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -144,10 +142,10 @@ public void testReplicationEnabled() { @Test public void testFindConsumerGroups() throws Exception { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); - MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Collections.emptySet(), config); + MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Set.of(), config); connector = spy(connector); - Collection groups = Arrays.asList( + List groups = List.of( new GroupListing("g1", Optional.of(GroupType.CLASSIC), "", Optional.empty()), new GroupListing("g2", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty())); Map offsets = new HashMap<>(); @@ -168,16 +166,16 @@ public void testFindConsumerGroups() throws Exception { doReturn(false).when(connector).shouldReplicateByTopicFilter(anyString()); Set topicFilterGroupFound = connector.findConsumerGroups(); - assertEquals(Collections.emptySet(), topicFilterGroupFound); + assertEquals(Set.of(), topicFilterGroupFound); } @Test public void testFindConsumerGroupsInCommonScenarios() throws Exception { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); - MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Collections.emptySet(), config); + MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Set.of(), config); connector = spy(connector); - Collection groups = Arrays.asList( + List groups = List.of( new GroupListing("g1", Optional.of(GroupType.CLASSIC), "", Optional.empty()), new GroupListing("g2", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty()), new GroupListing("g3", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty()), @@ -203,7 +201,7 @@ public void testFindConsumerGroupsInCommonScenarios() throws Exception { groupToOffsets.put("g1", offsetsForGroup1); groupToOffsets.put("g2", offsetsForGroup2); groupToOffsets.put("g3", offsetsForGroup3); - doReturn(groupToOffsets).when(connector).listConsumerGroupOffsets(Arrays.asList("g1", "g2", "g3")); + doReturn(groupToOffsets).when(connector).listConsumerGroupOffsets(List.of("g1", "g2", "g3")); Set groupFound = connector.findConsumerGroups(); Set verifiedSet = new HashSet<>(); @@ -215,8 +213,8 @@ public void testFindConsumerGroupsInCommonScenarios() throws Exception { @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( - Collections.singletonMap("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( + Map.of("unused_partition_key", "unused_partition_value"), SOURCE_OFFSET ))); @@ -231,7 +229,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( partition, SOURCE_OFFSET )); @@ -240,7 +238,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : Arrays.asList(CONSUMER_GROUP_ID_KEY, TOPIC_KEY, PARTITION_KEY)) { + for (String key : List.of(CONSUMER_GROUP_ID_KEY, TOPIC_KEY, PARTITION_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -252,7 +250,7 @@ public void testAlterOffsetsInvalidPartitionPartition() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); Map partition = sourcePartition("consumer-app-2", "t", 3); partition.put(PARTITION_KEY, "a string"); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( partition, SOURCE_OFFSET ))); @@ -276,9 +274,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("consumer-app-5", "t1", 2), - Collections.singletonMap("unused_offset_key", 0) + Map.of("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -287,7 +285,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( + Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( sourcePartition("consumer-app-6", "t", 5), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -308,7 +306,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("consumer-app-7", "t2", 0), SOURCE_OFFSET ); @@ -317,7 +315,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Collections.emptyMap())); + assertTrue(connector.alterOffsets(null, Map.of())); } @Test @@ -337,8 +335,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); - assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Map.of())); + assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String consumerGroupId, String topic, int partition) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java index f4cc1e4ced6a4..7ce554d5f663d 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java @@ -22,12 +22,12 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalLong; +import java.util.Set; import java.util.concurrent.ExecutionException; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,8 +43,8 @@ public class MirrorCheckpointTaskTest { @Test public void testDownstreamTopicRenaming() { MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), null, Collections.emptySet(), Collections.emptyMap(), - new CheckpointStore(Collections.emptyMap())); + new DefaultReplicationPolicy(), null, Set.of(), Map.of(), + new CheckpointStore(Map.of())); assertEquals(new TopicPartition("source1.topic3", 4), mirrorCheckpointTask.renameTopicPartition(new TopicPartition("topic3", 4)), "Renaming source1.topic3 failed"); @@ -65,8 +65,8 @@ public void testCheckpoint() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), - Collections.emptyMap(), new CheckpointStore(Collections.emptyMap())); + new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), + Map.of(), new CheckpointStore(Map.of())); offsetSyncStore.sync(new TopicPartition("topic1", 2), t1UpstreamOffset, t1DownstreamOffset); offsetSyncStore.sync(new TopicPartition("target2.topic5", 6), t2UpstreamOffset, t2DownstreamOffset); Optional optionalCheckpoint1 = mirrorCheckpointTask.checkpoint("group9", new TopicPartition("topic1", 2), @@ -166,7 +166,7 @@ public void testSyncOffset() throws ExecutionException, InterruptedException { checkpointsPerConsumerGroup.put(consumer2, checkpointMapC2); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), null, Collections.emptySet(), idleConsumerGroupsOffset, + new DefaultReplicationPolicy(), null, Set.of(), idleConsumerGroupsOffset, new CheckpointStore(checkpointsPerConsumerGroup)); Map> output = mirrorCheckpointTask.syncGroupOffset(); @@ -197,7 +197,7 @@ public void testSyncOffsetForTargetGroupWithNullOffsetAndMetadata() throws Execu checkpointsPerConsumerGroup.put(consumer, checkpointMap); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source", "target", - new DefaultReplicationPolicy(), null, Collections.emptySet(), idleConsumerGroupsOffset, + new DefaultReplicationPolicy(), null, Set.of(), idleConsumerGroupsOffset, new CheckpointStore(checkpointsPerConsumerGroup)); Map> output = mirrorCheckpointTask.syncGroupOffset(); @@ -210,8 +210,8 @@ public void testNoCheckpointForTopicWithoutOffsetSyncs() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), - new CheckpointStore(Collections.emptyMap())); + new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), + new CheckpointStore(Map.of())); offsetSyncStore.sync(new TopicPartition("topic1", 0), 3L, 4L); Optional checkpoint1 = mirrorCheckpointTask.checkpoint("group9", new TopicPartition("topic1", 1), @@ -227,8 +227,8 @@ public void testNoCheckpointForTopicWithNullOffsetAndMetadata() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), - new CheckpointStore(Collections.emptyMap())); + new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), + new CheckpointStore(Map.of())); offsetSyncStore.sync(new TopicPartition("topic1", 0), 1L, 3L); Optional checkpoint = mirrorCheckpointTask.checkpoint("g1", new TopicPartition("topic1", 0), null); assertFalse(checkpoint.isPresent()); @@ -240,7 +240,7 @@ public void testCheckpointRecordsMonotonicIfStoreRewinds() { offsetSyncStore.start(true); Map> checkpointsPerConsumerGroup = new HashMap<>(); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), + new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), new CheckpointStore(checkpointsPerConsumerGroup)); TopicPartition tp = new TopicPartition("topic1", 0); TopicPartition targetTP = new TopicPartition("source1.topic1", 0); @@ -277,7 +277,7 @@ public void testCheckpointRecordsMonotonicIfStoreRewinds() { private Map assertCheckpointForTopic( MirrorCheckpointTask task, TopicPartition tp, TopicPartition remoteTp, long consumerGroupOffset, boolean truth ) { - Map consumerGroupOffsets = Collections.singletonMap(tp, new OffsetAndMetadata(consumerGroupOffset)); + Map consumerGroupOffsets = Map.of(tp, new OffsetAndMetadata(consumerGroupOffset)); Map checkpoints = task.checkpointsForGroup(consumerGroupOffsets, "g1"); assertEquals(truth, checkpoints.containsKey(remoteTp), "should" + (truth ? "" : " not") + " emit offset sync"); return checkpoints; @@ -299,8 +299,8 @@ void backingStoreStart() { offsetSyncStore.start(false); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), - new CheckpointStore(Collections.emptyMap())); + new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), + new CheckpointStore(Map.of())); // Generate a checkpoint for upstream offset 250, and assert it maps to downstream 201 // (as nearest mapping in OffsetSyncStore is 200->200) @@ -327,7 +327,7 @@ void backingStoreStart() { Map> checkpointsPerConsumerGroup = new HashMap<>(); checkpointsPerConsumerGroup.put("group1", checkpoints); MirrorCheckpointTask mirrorCheckpointTask2 = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore2, Collections.emptySet(), Collections.emptyMap(), + new DefaultReplicationPolicy(), offsetSyncStore2, Set.of(), Map.of(), new CheckpointStore(checkpointsPerConsumerGroup)); // Upstream offsets 250 and 370 now have the closest downstream value of 176, but this is @@ -354,14 +354,14 @@ public void testCheckpointStoreInitialized() throws InterruptedException { MirrorCheckpointTask task = new MirrorCheckpointTask("source1", "target2", new DefaultReplicationPolicy(), new OffsetSyncStoreTest.FakeOffsetSyncStore(), - Collections.singleton("group"), - Collections.emptyMap(), + Set.of("group"), + Map.of(), checkpointStore) { @Override List sourceRecordsForGroup(String group) { - SourceRecord sr = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "", 0, null, null); - return Collections.singletonList(sr); + SourceRecord sr = new SourceRecord(Map.of(), Map.of(), "", 0, null, null); + return List.of(sr); } }; diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java index 190f749d4e71b..8c5be805a936c 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java @@ -20,7 +20,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -67,8 +66,8 @@ public void testReplicationDisabled() { @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( - Collections.singletonMap("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( + Map.of("unused_partition_key", "unused_partition_value"), SOURCE_OFFSET ))); @@ -83,7 +82,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( partition, SOURCE_OFFSET )); @@ -92,7 +91,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : Arrays.asList(SOURCE_CLUSTER_ALIAS_KEY, TARGET_CLUSTER_ALIAS_KEY)) { + for (String key : List.of(SOURCE_CLUSTER_ALIAS_KEY, TARGET_CLUSTER_ALIAS_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -117,9 +116,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("primary", "backup"), - Collections.singletonMap("unused_offset_key", 0) + Map.of("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -128,7 +127,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( + Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( sourcePartition("primary", "backup"), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -149,7 +148,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("primary", "backup"), SOURCE_OFFSET ); @@ -158,7 +157,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Collections.emptyMap())); + assertTrue(connector.alterOffsets(null, Map.of())); } @Test @@ -178,8 +177,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); - assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Map.of())); + assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String sourceClusterAlias, String targetClusterAlias) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java index 0ffe2635d1491..8faf52a12b6b1 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java @@ -35,9 +35,9 @@ public void testPollCreatesRecords() throws InterruptedException { List records = heartbeatTask.poll(); assertEquals(1, records.size()); Map sourcePartition = records.iterator().next().sourcePartition(); - assertEquals(sourcePartition.get(Heartbeat.SOURCE_CLUSTER_ALIAS_KEY), "testSource", + assertEquals("testSource", sourcePartition.get(Heartbeat.SOURCE_CLUSTER_ALIAS_KEY), "sourcePartition's " + Heartbeat.SOURCE_CLUSTER_ALIAS_KEY + " record was not created"); - assertEquals(sourcePartition.get(Heartbeat.TARGET_CLUSTER_ALIAS_KEY), "testTarget", + assertEquals("testTarget", sourcePartition.get(Heartbeat.TARGET_CLUSTER_ALIAS_KEY), "sourcePartition's " + Heartbeat.TARGET_CLUSTER_ALIAS_KEY + " record was not created"); } } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java index 638db3de370a3..ddd22b0b8ad8e 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -106,7 +105,7 @@ public void testClientConfigProperties() { "replication.policy.separator is picked up in MirrorClientConfig"); assertEquals("b__topic1", aClientConfig.replicationPolicy().formatRemoteTopic("b", "topic1"), "replication.policy.separator is honored"); - assertEquals(clusterABootstrap, aClientConfig.adminConfig().get("bootstrap.servers"), + assertEquals(Arrays.stream(clusterABootstrap.split(",")).map(String::trim).toList(), aClientConfig.adminConfig().get("bootstrap.servers"), "client configs include bootstrap.servers"); try (ForwardingAdmin forwardingAdmin = aClientConfig.forwardingAdmin(aClientConfig.adminConfig())) { assertEquals(ForwardingAdmin.class.getName(), forwardingAdmin.getClass().getName(), @@ -152,11 +151,11 @@ public void testIncludesConnectorConfigProperties() { MirrorSourceConfig sourceConfig = new MirrorSourceConfig(connectorProps); assertEquals(100, (int) sourceConfig.getInt("tasks.max"), "Connector properties like tasks.max should be passed through to underlying Connectors."); - assertEquals(Collections.singletonList("topic-1"), sourceConfig.getList("topics"), + assertEquals(List.of("topic-1"), sourceConfig.getList("topics"), "Topics include should be passed through to underlying Connectors."); - assertEquals(Collections.singletonList("property-3"), sourceConfig.getList("config.properties.exclude"), + assertEquals(List.of("property-3"), sourceConfig.getList("config.properties.exclude"), "Config properties exclude should be passed through to underlying Connectors."); - assertEquals(Collections.singletonList("FakeMetricsReporter"), sourceConfig.getList("metric.reporters"), + assertEquals(List.of("FakeMetricsReporter"), sourceConfig.getList("metric.reporters"), "Metrics reporters should be passed through to underlying Connectors."); assertEquals("DefaultTopicFilter", sourceConfig.getClass("topic.filter.class").getSimpleName(), "Filters should be passed through to underlying Connectors."); @@ -166,7 +165,7 @@ public void testIncludesConnectorConfigProperties() { "Unknown properties should not be passed through to Connectors."); MirrorCheckpointConfig checkpointConfig = new MirrorCheckpointConfig(connectorProps); - assertEquals(Collections.singletonList("group-2"), checkpointConfig.getList("groups"), + assertEquals(List.of("group-2"), checkpointConfig.getList("groups"), "Groups include should be passed through to underlying Connectors."); } @@ -180,11 +179,11 @@ public void testIncludesTopicFilterProperties() { SourceAndTarget sourceAndTarget = new SourceAndTarget("source", "target"); Map connectorProps = mirrorConfig.connectorBaseConfig(sourceAndTarget, MirrorSourceConnector.class); - DefaultTopicFilter.TopicFilterConfig filterConfig = + DefaultTopicFilter.TopicFilterConfig filterConfig = new DefaultTopicFilter.TopicFilterConfig(connectorProps); - assertEquals(Arrays.asList("topic1", "topic2"), filterConfig.getList("topics"), + assertEquals(List.of("topic1", "topic2"), filterConfig.getList("topics"), "source->target.topics should be passed through to TopicFilters."); - assertEquals(Collections.singletonList("topic3"), filterConfig.getList("topics.exclude"), + assertEquals(List.of("topic3"), filterConfig.getList("topics.exclude"), "source->target.topics.exclude should be passed through to TopicFilters."); } @@ -318,7 +317,10 @@ public void testInvalidSecurityProtocol() { @Test public void testClientInvalidSecurityProtocol() { ConfigException ce = assertThrows(ConfigException.class, - () -> new MirrorClientConfig(makeProps(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"))); + () -> new MirrorClientConfig(makeProps( + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc", + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + ))); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -326,7 +328,9 @@ public void testClientInvalidSecurityProtocol() { public void testCaseInsensitiveSecurityProtocol() { final String saslSslLowerCase = SecurityProtocol.SASL_SSL.name.toLowerCase(Locale.ROOT); final MirrorClientConfig config = new MirrorClientConfig(makeProps( - CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase)); + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase, + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + )); assertEquals(saslSslLowerCase, config.originalsStrings().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -367,7 +371,7 @@ public void testLazyConfigResolution() { public static class FakeConfigProvider implements ConfigProvider { - Map secrets = Collections.singletonMap("password", "secret2"); + Map secrets = Map.of("password", "secret2"); @Override public void configure(Map props) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java index ccdc7a878a505..c7f7f4e19a51c 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java @@ -22,7 +22,6 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -38,7 +37,7 @@ public class MirrorSourceConfigTest { @Test public void testTaskConfigTopicPartitions() { - List topicPartitions = Arrays.asList(new TopicPartition("topic-1", 2), + List topicPartitions = List.of(new TopicPartition("topic-1", 2), new TopicPartition("topic-3", 4), new TopicPartition("topic-5", 6)); MirrorSourceConfig config = new MirrorSourceConfig(makeProps()); Map props = config.taskConfigForTopicPartitions(topicPartitions, 1); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java index 21bcc7cbad5e9..1d106d6deaaa0 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java @@ -44,7 +44,6 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -55,7 +54,6 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.function.Function; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.ISOLATION_LEVEL_CONFIG; import static org.apache.kafka.connect.mirror.MirrorConnectorConfig.CONSUMER_CLIENT_PREFIX; @@ -106,7 +104,7 @@ public void testReplicatesHeartbeatsDespiteFilter() { assertTrue(connector.shouldReplicateTopic("heartbeats"), "should replicate heartbeats"); assertTrue(connector.shouldReplicateTopic("us-west.heartbeats"), "should replicate upstream heartbeats"); - Map configs = Collections.singletonMap(DefaultReplicationPolicy.SEPARATOR_CONFIG, "_"); + Map configs = Map.of(DefaultReplicationPolicy.SEPARATOR_CONFIG, "_"); defaultReplicationPolicy.configure(configs); assertTrue(connector.shouldReplicateTopic("heartbeats"), "should replicate heartbeats"); assertFalse(connector.shouldReplicateTopic("us-west.heartbeats"), "should not consider this topic as a heartbeats topic"); @@ -184,15 +182,15 @@ public void testAclTransformation() { String expectedRemoteTopicName = "source" + DefaultReplicationPolicy.SEPARATOR_DEFAULT + allowAllAclBinding.pattern().name(); assertEquals(expectedRemoteTopicName, processedAllowAllAclBinding.pattern().name(), "should change topic name"); - assertEquals(processedAllowAllAclBinding.entry().operation(), AclOperation.READ, "should change ALL to READ"); - assertEquals(processedAllowAllAclBinding.entry().permissionType(), AclPermissionType.ALLOW, "should not change ALLOW"); + assertEquals(AclOperation.READ, processedAllowAllAclBinding.entry().operation(), "should change ALL to READ"); + assertEquals(AclPermissionType.ALLOW, processedAllowAllAclBinding.entry().permissionType(), "should not change ALLOW"); AclBinding denyAllAclBinding = new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.DENY)); AclBinding processedDenyAllAclBinding = connector.targetAclBinding(denyAllAclBinding); - assertEquals(processedDenyAllAclBinding.entry().operation(), AclOperation.ALL, "should not change ALL"); - assertEquals(processedDenyAllAclBinding.entry().permissionType(), AclPermissionType.DENY, "should not change DENY"); + assertEquals(AclOperation.ALL, processedDenyAllAclBinding.entry().operation(), "should not change ALL"); + assertEquals(AclPermissionType.DENY, processedDenyAllAclBinding.entry().permissionType(), "should not change DENY"); } @Test @@ -280,7 +278,7 @@ public void testConfigPropertyFiltering() { new DefaultReplicationPolicy(), x -> true, new DefaultConfigPropertyFilter()); ArrayList entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, true); @@ -300,7 +298,7 @@ public void testConfigPropertyFilteringWithAlterConfigs() { List entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); // When "use.defaults.from" set to "target" by default, the config with default value should be excluded - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, false); @@ -315,7 +313,7 @@ public void testConfigPropertyFilteringWithAlterConfigs() { @Test @Deprecated public void testConfigPropertyFilteringWithAlterConfigsAndSourceDefault() { - Map filterConfig = Collections.singletonMap(DefaultConfigPropertyFilter.USE_DEFAULTS_FROM, "source"); + Map filterConfig = Map.of(DefaultConfigPropertyFilter.USE_DEFAULTS_FROM, "source"); DefaultConfigPropertyFilter filter = new DefaultConfigPropertyFilter(); filter.configure(filterConfig); @@ -324,7 +322,7 @@ public void testConfigPropertyFilteringWithAlterConfigsAndSourceDefault() { List entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); // When "use.defaults.from" explicitly set to "source", the config with default value should be replicated - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, false); @@ -358,7 +356,7 @@ public void testNewTopicConfigs() throws Exception { entries.add(new ConfigEntry("exclude_param.param1", "value-param1")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); - doReturn(Collections.singletonMap(topic, config)).when(connector).describeTopicConfigs(any()); + doReturn(Map.of(topic, config)).when(connector).describeTopicConfigs(any()); doAnswer(invocation -> { Map newTopics = invocation.getArgument(0); assertNotNull(newTopics.get("source." + topic)); @@ -375,7 +373,7 @@ public void testNewTopicConfigs() throws Exception { assertNull(targetConfig.get(prop2), "should not replicate excluded properties " + prop2); return null; }).when(connector).createNewTopics(any()); - connector.createNewTopics(Collections.singleton(topic), Collections.singletonMap(topic, 1L)); + connector.createNewTopics(Set.of(topic), Map.of(topic, 1L)); verify(connector).createNewTopics(any(), any()); } @@ -433,15 +431,15 @@ public void testRefreshTopicPartitions() throws Exception { connector.initialize(mock(ConnectorContext.class)); connector = spy(connector); - Config topicConfig = new Config(Arrays.asList( + Config topicConfig = new Config(List.of( new ConfigEntry("cleanup.policy", "compact"), new ConfigEntry("segment.bytes", "100"))); - Map configs = Collections.singletonMap("topic", topicConfig); + Map configs = Map.of("topic", topicConfig); - List sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0)); + List sourceTopicPartitions = List.of(new TopicPartition("topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); - doReturn(Collections.emptyList()).when(connector).findTargetTopicPartitions(); - doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("topic")); + doReturn(List.of()).when(connector).findTargetTopicPartitions(); + doReturn(configs).when(connector).describeTopicConfigs(Set.of("topic")); doNothing().when(connector).createNewTopics(any()); connector.refreshTopicPartitions(); @@ -460,7 +458,7 @@ public void testRefreshTopicPartitions() throws Exception { verify(connector, times(2)).createNewTopics(eq(expectedNewTopics)); verify(connector, times(0)).createNewPartitions(any()); - List targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0)); + List targetTopicPartitions = List.of(new TopicPartition("source.topic", 0)); doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions(); connector.refreshTopicPartitions(); @@ -475,17 +473,17 @@ public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception { connector.initialize(mock(ConnectorContext.class)); connector = spy(connector); - Config topicConfig = new Config(Arrays.asList( + Config topicConfig = new Config(List.of( new ConfigEntry("cleanup.policy", "compact"), new ConfigEntry("segment.bytes", "100"))); - Map configs = Collections.singletonMap("source.topic", topicConfig); + Map configs = Map.of("source.topic", topicConfig); - List sourceTopicPartitions = Collections.emptyList(); - List targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0)); + List sourceTopicPartitions = List.of(); + List targetTopicPartitions = List.of(new TopicPartition("source.topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions(); - doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("source.topic")); - doReturn(Collections.emptyMap()).when(connector).describeTopicConfigs(Collections.emptySet()); + doReturn(configs).when(connector).describeTopicConfigs(Set.of("source.topic")); + doReturn(Map.of()).when(connector).describeTopicConfigs(Set.of()); doNothing().when(connector).createNewTopics(any()); doNothing().when(connector).createNewPartitions(any()); @@ -494,7 +492,7 @@ public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception { connector.refreshTopicPartitions(); verify(connector, times(0)).computeAndCreateTopicPartitions(); - sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0)); + sourceTopicPartitions = List.of(new TopicPartition("topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); // when partitions are added to the source cluster, reconfiguration is triggered @@ -620,7 +618,7 @@ private Optional validateProperty(String name, Map List results = new MirrorSourceConnector().validate(props) .configValues().stream() .filter(cv -> name.equals(cv.name())) - .collect(Collectors.toList()); + .toList(); assertTrue(results.size() <= 1, "Connector produced multiple config values for '" + name + "' property"); @@ -635,8 +633,8 @@ private Optional validateProperty(String name, Map @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( - Collections.singletonMap("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( + Map.of("unused_partition_key", "unused_partition_value"), MirrorUtils.wrapOffset(10) ))); @@ -651,7 +649,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( partition, MirrorUtils.wrapOffset(64) )); @@ -660,7 +658,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : Arrays.asList(SOURCE_CLUSTER_KEY, TOPIC_KEY, PARTITION_KEY)) { + for (String key : List.of(SOURCE_CLUSTER_KEY, TOPIC_KEY, PARTITION_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -672,7 +670,7 @@ public void testAlterOffsetsInvalidPartitionPartition() { MirrorSourceConnector connector = new MirrorSourceConnector(); Map partition = sourcePartition("t", 3, "us-west-2"); partition.put(PARTITION_KEY, "a string"); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( partition, MirrorUtils.wrapOffset(49) ))); @@ -696,9 +694,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("t1", 2, "backup"), - Collections.singletonMap("unused_offset_key", 0) + Map.of("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -707,7 +705,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( + Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( sourcePartition("t", 5, "backup"), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -728,7 +726,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Map, Map> offsets = Collections.singletonMap( + Map, Map> offsets = Map.of( sourcePartition("t2", 0, "backup"), MirrorUtils.wrapOffset(5) ); @@ -737,7 +735,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Collections.emptyMap())); + assertTrue(connector.alterOffsets(null, Map.of())); } @Test @@ -757,8 +755,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); - assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Map.of())); + assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String topic, int partition, String sourceClusterAlias) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java index d8322fe224045..4a67685537824 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java @@ -35,9 +35,7 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -224,14 +222,14 @@ public void testSeekBehaviorDuringStart() { OffsetStorageReader mockOffsetStorageReader = mock(OffsetStorageReader.class); when(mockSourceTaskContext.offsetStorageReader()).thenReturn(mockOffsetStorageReader); - Set topicPartitions = new HashSet<>(Arrays.asList( + Set topicPartitions = Set.of( new TopicPartition("previouslyReplicatedTopic", 8), new TopicPartition("previouslyReplicatedTopic1", 0), new TopicPartition("previouslyReplicatedTopic", 1), new TopicPartition("newTopicToReplicate1", 1), new TopicPartition("newTopicToReplicate1", 4), new TopicPartition("newTopicToReplicate2", 0) - )); + ); long arbitraryCommittedOffset = 4L; long offsetToSeek = arbitraryCommittedOffset + 1L; diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java index e6de8a58f7b26..daa818e293e79 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -51,7 +50,7 @@ public class MirrorUtilsTest { @Test public void testCreateCompactedTopic() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenReturn(null); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -64,7 +63,7 @@ public void testCreateCompactedTopic() throws Exception { @Test public void testCreateCompactedTopicAlreadyExists() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TopicExistsException("topic exists"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -77,7 +76,7 @@ public void testCreateCompactedTopicAlreadyExists() throws Exception { @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithUnsupportedVersionException() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new UnsupportedVersionException("unsupported"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -90,7 +89,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithUnsupportedVersi @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithClusterAuthorizationException() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new ClusterAuthorizationException("not authorised"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -103,7 +102,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithClusterAuthoriza @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithTopicAuthorizationException() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TopicAuthorizationException("not authorised"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -116,7 +115,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithTopicAuthorizati @Test public void testCreateCompactedTopicFailsWithInvalidConfigurationException() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new InvalidConfigurationException("wrong config"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -130,7 +129,7 @@ public void testCreateCompactedTopicFailsWithInvalidConfigurationException() thr @Test public void testCreateCompactedTopicFailsWithTimeoutException() throws Exception { - Map> values = Collections.singletonMap(TOPIC, future); + Map> values = Map.of(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TimeoutException("Timeout"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java index 9a6b10920a058..19c8d9d39524d 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java @@ -55,11 +55,11 @@ public void testMaybeQueueOffsetSyncs() { offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 0, 1); assertFalse(offsetSyncWriter.getDelayedOffsetSyncs().containsKey(topicPartition)); assertTrue(offsetSyncWriter.getPendingOffsetSyncs().containsKey(topicPartition)); - assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 1); + assertEquals(1, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 1, 2); assertTrue(offsetSyncWriter.getDelayedOffsetSyncs().containsKey(topicPartition)); - assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 1); + assertEquals(1, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); } @Test @@ -71,7 +71,7 @@ public void testFirePendingOffsetSyncs() { OffsetSyncWriter offsetSyncWriter = new OffsetSyncWriter(producer, topicName, outstandingOffsetSyncs, maxOffsetLag); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 0, 100); - assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 100); + assertEquals(100, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); offsetSyncWriter.firePendingOffsetSyncs(); @@ -85,7 +85,7 @@ public void testFirePendingOffsetSyncs() { verify(producer, times(1)).send(any(), any()); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 2, 102); - assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 102); + assertEquals(102, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); offsetSyncWriter.firePendingOffsetSyncs(); // in-flight offset syncs; will not try to send remaining offset syncs immediately diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java index 1c08cbaf72ef9..b55673810a427 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java @@ -66,7 +66,7 @@ public static void updatePartitionCount(String topic, int newPartitionCount) { */ public static void updateTopicConfig(String topic, Config newConfig) { ConcurrentHashMap topicConfigs = FakeLocalMetadataStore.ALL_TOPICS.getOrDefault(topic, new ConcurrentHashMap<>()); - newConfig.entries().stream().forEach(configEntry -> { + newConfig.entries().forEach(configEntry -> { if (configEntry.name() != null) { if (configEntry.value() != null) { log.debug("Topic '{}' update config '{}' to '{}'", topic, configEntry.name(), configEntry.value()); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java index 2ba4438bdba9b..1d1dd0feea341 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java @@ -45,8 +45,6 @@ import org.slf4j.LoggerFactory; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,7 +56,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; -import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; import static org.apache.kafka.connect.mirror.MirrorMaker.CONNECTOR_CLASSES; @@ -145,7 +142,7 @@ public void testSingleNodeCluster() throws Exception { final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap() {{ + Map mmProps = new HashMap<>() {{ put("dedicated.mode.enable.internal.rest", "false"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -207,7 +204,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { final String ab = a + "->" + b; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap() {{ + Map mmProps = new HashMap<>() {{ put("dedicated.mode.enable.internal.rest", "false"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -230,7 +227,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { // Bring up a single-node cluster final MirrorMaker mm = startMirrorMaker("no-offset-syncing", mmProps); final SourceAndTarget sourceAndTarget = new SourceAndTarget(a, b); - awaitMirrorMakerStart(mm, sourceAndTarget, Arrays.asList(MirrorSourceConnector.class, MirrorHeartbeatConnector.class)); + awaitMirrorMakerStart(mm, sourceAndTarget, List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class)); // wait for mirror source and heartbeat connectors to start a task awaitConnectorTasksStart(mm, MirrorHeartbeatConnector.class, sourceAndTarget); @@ -256,7 +253,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { .stream() .filter(Optional::isPresent) .map(Optional::get) - .collect(Collectors.toList()); + .toList(); assertTrue(offsetSyncTopic.isEmpty()); } @@ -291,7 +288,7 @@ public void testMultiNodeCluster() throws Exception { final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap() {{ + Map mmProps = new HashMap<>() {{ put("dedicated.mode.enable.internal.rest", "true"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -451,8 +448,8 @@ private void awaitTaskConfigurations(MirrorMaker mm, } private void awaitTopicContent(EmbeddedKafkaCluster cluster, String clusterName, String topic, int numMessages) throws Exception { - try (Consumer consumer = cluster.createConsumer(Collections.singletonMap(AUTO_OFFSET_RESET_CONFIG, "earliest"))) { - consumer.subscribe(Collections.singleton(topic)); + try (Consumer consumer = cluster.createConsumer(Map.of(AUTO_OFFSET_RESET_CONFIG, "earliest"))) { + consumer.subscribe(Set.of(topic)); AtomicInteger messagesRead = new AtomicInteger(0); waitForCondition( () -> { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java index 0a6ab4bab158c..1d4339f397796 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java @@ -21,7 +21,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; -import java.util.HashMap; +import java.util.Map; /** * Tests MM2 replication and failover logic for {@link IdentityReplicationPolicy}. @@ -36,10 +36,10 @@ public class IdentityReplicationIntegrationTest extends MirrorConnectorsIntegrat @BeforeEach public void startClusters() throws Exception { replicateBackupToPrimary = false; - super.startClusters(new HashMap() {{ - put("replication.policy.class", IdentityReplicationPolicy.class.getName()); - put("topics", "test-topic-.*"); - }}); + super.startClusters(Map.of( + "replication.policy.class", IdentityReplicationPolicy.class.getName(), + "topics", "test-topic-.*" + )); } /* diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java index b278285e60651..6d1d50f558bab 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java @@ -67,9 +67,7 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -127,7 +125,7 @@ public class MirrorConnectorsIntegrationBaseTest { protected static final Duration CONSUMER_POLL_TIMEOUT = Duration.ofMillis(500L); protected static final String PRIMARY_CLUSTER_ALIAS = "primary"; protected static final String BACKUP_CLUSTER_ALIAS = "backup"; - protected static final List> CONNECTOR_LIST = Arrays.asList( + protected static final List> CONNECTOR_LIST = List.of( MirrorSourceConnector.class, MirrorCheckpointConnector.class, MirrorHeartbeatConnector.class); @@ -154,7 +152,7 @@ public class MirrorConnectorsIntegrationBaseTest { @BeforeEach public void startClusters() throws Exception { - startClusters(new HashMap() {{ + startClusters(new HashMap<>() {{ put("topics", "test-topic-.*, primary.test-topic-.*, backup.test-topic-.*"); }}); } @@ -243,7 +241,7 @@ public void startClusters(Map additionalMM2Config) throws Except waitForTopicCreated(backup, "mm2-configs.primary.internal"); waitForTopicCreated(backup, "test-topic-1"); waitForTopicCreated(primary, "test-topic-1"); - warmUpConsumer(Collections.singletonMap("group.id", "consumer-group-dummy")); + warmUpConsumer(Map.of("group.id", "consumer-group-dummy")); log.info(PRIMARY_CLUSTER_ALIAS + " REST service: {}", primary.endpointForResource("connectors")); log.info(BACKUP_CLUSTER_ALIAS + " REST service: {}", backup.endpointForResource("connectors")); @@ -290,14 +288,14 @@ public void testReplication() throws Exception { } String reverseTopic1 = remoteTopicName("test-topic-1", BACKUP_CLUSTER_ALIAS); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // warm up consumers before starting the connectors, so we don't need to wait for discovery warmUpConsumer(consumerProps); mm2Config = new MirrorMakerConfig(mm2Props); waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); - List> primaryConnectors = replicateBackupToPrimary ? CONNECTOR_LIST : Collections.singletonList(MirrorHeartbeatConnector.class); + List> primaryConnectors = replicateBackupToPrimary ? CONNECTOR_LIST : List.of(MirrorHeartbeatConnector.class); waitUntilMirrorMakerIsRunning(primary, primaryConnectors, mm2Config, BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS); MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS)); @@ -370,7 +368,7 @@ public void testReplication() throws Exception { backupClient, consumerGroupName, PRIMARY_CLUSTER_ALIAS, backupTopic1); // Failover consumer group to backup cluster. - try (Consumer primaryConsumer = backup.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) { + try (Consumer primaryConsumer = backup.kafka().createConsumer(Map.of("group.id", consumerGroupName))) { primaryConsumer.assign(backupOffsets.keySet()); backupOffsets.forEach(primaryConsumer::seek); primaryConsumer.poll(CONSUMER_POLL_TIMEOUT); @@ -391,7 +389,7 @@ public void testReplication() throws Exception { primaryClient, consumerGroupName, BACKUP_CLUSTER_ALIAS, reverseTopic1); // Failback consumer group to primary cluster - try (Consumer primaryConsumer = primary.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) { + try (Consumer primaryConsumer = primary.kafka().createConsumer(Map.of("group.id", consumerGroupName))) { primaryConsumer.assign(primaryOffsets.keySet()); primaryOffsets.forEach(primaryConsumer::seek); primaryConsumer.poll(CONSUMER_POLL_TIMEOUT); @@ -435,7 +433,7 @@ public void testReplication() throws Exception { @Test public void testReplicationWithEmptyPartition() throws Exception { String consumerGroupName = "consumer-group-testReplicationWithEmptyPartition"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // create topic String topic = "test-topic-with-empty-partition"; @@ -493,7 +491,7 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter produceMessages(primaryProducer, "test-topic-1"); String backupTopic1 = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); String consumerGroupName = "consumer-group-testOneWayReplicationWithAutoOffsetSync"; - Map consumerProps = new HashMap() {{ + Map consumerProps = new HashMap<>() {{ put("group.id", consumerGroupName); put("auto.offset.reset", "earliest"); }}; @@ -526,7 +524,7 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo( consumerProps, backupTopic1)) { - waitForConsumerGroupFullSync(backup, Collections.singletonList(backupTopic1), + waitForConsumerGroupFullSync(backup, List.of(backupTopic1), consumerGroupName, NUM_RECORDS_PRODUCED, offsetLagMax); assertDownstreamRedeliveriesBoundedByMaxLag(backupConsumer, offsetLagMax); } @@ -541,17 +539,17 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter produceMessages(primaryProducer, "test-topic-2"); // create a consumer at primary cluster to consume the new topic - try (Consumer consumer1 = primary.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( + try (Consumer consumer1 = primary.kafka().createConsumerAndSubscribeTo(Map.of( "group.id", consumerGroupName), "test-topic-2")) { // we need to wait for consuming all the records for MM2 replicating the expected offsets waitForConsumingAllRecords(consumer1, NUM_RECORDS_PRODUCED); } // create a consumer at backup cluster with same consumer group ID to consume old and new topic - try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( + try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Map.of( "group.id", consumerGroupName), backupTopic1, remoteTopic2)) { - waitForConsumerGroupFullSync(backup, Arrays.asList(backupTopic1, remoteTopic2), + waitForConsumerGroupFullSync(backup, List.of(backupTopic1, remoteTopic2), consumerGroupName, NUM_RECORDS_PRODUCED, offsetLagMax); assertDownstreamRedeliveriesBoundedByMaxLag(backupConsumer, offsetLagMax); } @@ -567,7 +565,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro produceMessages(backupProducer, "test-topic-1"); } String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // warm up consumers before starting the connectors, so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -576,7 +574,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro mm2Config = new MirrorMakerConfig(mm2Props); - waitUntilMirrorMakerIsRunning(backup, Arrays.asList(MirrorSourceConnector.class, MirrorHeartbeatConnector.class), mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); + waitUntilMirrorMakerIsRunning(backup, List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class), mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS)); MirrorClient backupClient = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS)); @@ -595,7 +593,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro .stream() .filter(Optional::isPresent) .map(Optional::get) - .collect(Collectors.toList()); + .toList(); assertTrue(offsetSyncTopic.isEmpty()); primaryClient.close(); @@ -617,7 +615,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { waitForTopicCreated(backup, "mm2-offset-syncs." + PRIMARY_CLUSTER_ALIAS + ".internal"); String consumerGroupName = "consumer-group-syncs-on-target"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); produceMessages(primaryProducer, "test-topic-1"); @@ -626,7 +624,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); // Check offsets are pushed to the checkpoint topic - try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( + try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Map.of( "auto.offset.reset", "earliest"), PRIMARY_CLUSTER_ALIAS + ".checkpoints.internal")) { waitForCondition(() -> { ConsumerRecords records = backupConsumer.poll(Duration.ofSeconds(1L)); @@ -655,7 +653,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { @Test public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedException { String consumerGroupName = "consumer-group-no-checkpoints"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // ensure there are some records in the topic on the source cluster produceMessages(primaryProducer, "test-topic-1"); @@ -676,7 +674,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio TopicPartition tp1 = new TopicPartition("test-topic-1", 0); TopicPartition tp2 = new TopicPartition("test-topic-no-checkpoints", 0); try (Consumer consumer = primary.kafka().createConsumer(consumerProps)) { - Collection tps = Arrays.asList(tp1, tp2); + Collection tps = List.of(tp1, tp2); Map endOffsets = consumer.endOffsets(tps); Map offsetsToCommit = endOffsets.entrySet().stream() .collect(Collectors.toMap( @@ -699,7 +697,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio produceMessages(primaryProducer, "test-topic-no-checkpoints"); try (Consumer consumer = primary.kafka().createConsumer(consumerProps)) { - Collection tps = Arrays.asList(tp1, tp2); + Collection tps = List.of(tp1, tp2); Map endOffsets = consumer.endOffsets(tps); Map offsetsToCommit = endOffsets.entrySet().stream() .collect(Collectors.toMap( @@ -722,7 +720,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio @Test public void testRestartReplication() throws InterruptedException { String consumerGroupName = "consumer-group-restart"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); warmUpConsumer(consumerProps); mm2Props.put("sync.group.offsets.enabled", "true"); @@ -734,7 +732,7 @@ public void testRestartReplication() throws InterruptedException { try (Consumer primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) { waitForConsumingAllRecords(primaryConsumer, NUM_RECORDS_PRODUCED); } - waitForConsumerGroupFullSync(backup, Collections.singletonList(remoteTopic), consumerGroupName, NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); + waitForConsumerGroupFullSync(backup, List.of(remoteTopic), consumerGroupName, NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); restartMirrorMakerConnectors(backup, CONNECTOR_LIST); assertMonotonicCheckpoints(backup, "primary.checkpoints.internal"); Thread.sleep(5000); @@ -742,14 +740,14 @@ public void testRestartReplication() throws InterruptedException { try (Consumer primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) { waitForConsumingAllRecords(primaryConsumer, NUM_RECORDS_PRODUCED); } - waitForConsumerGroupFullSync(backup, Collections.singletonList(remoteTopic), consumerGroupName, 2 * NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); + waitForConsumerGroupFullSync(backup, List.of(remoteTopic), consumerGroupName, 2 * NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); assertMonotonicCheckpoints(backup, "primary.checkpoints.internal"); } @Test public void testOffsetTranslationBehindReplicationFlow() throws InterruptedException { String consumerGroupName = "consumer-group-lagging-behind"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); warmUpConsumer(consumerProps); mm2Props.put("sync.group.offsets.enabled", "true"); @@ -839,7 +837,7 @@ public void testSyncTopicConfigs() throws InterruptedException { Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("delete.retention.ms", "2000"), AlterConfigOp.OpType.SET)); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "2000"), AlterConfigOp.OpType.SET)); - Map> configOps = Collections.singletonMap(configResource, ops); + Map> configOps = Map.of(configResource, ops); // alter configs on target cluster backup.kafka().incrementalAlterConfigs(configOps); @@ -879,7 +877,7 @@ public void testReplicateSourceDefault() throws Exception { Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("delete.retention.ms", "2000"), AlterConfigOp.OpType.SET)); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "2000"), AlterConfigOp.OpType.SET)); - Map> configOps = Collections.singletonMap(configResource, ops); + Map> configOps = Map.of(configResource, ops); backup.kafka().incrementalAlterConfigs(configOps); waitForCondition(() -> { @@ -933,7 +931,7 @@ public void testReplicateTargetDefault() throws Exception { ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic); Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "1000"), AlterConfigOp.OpType.DELETE)); - Map> configOps = Collections.singletonMap(configResource, ops); + Map> configOps = Map.of(configResource, ops); primary.kafka().incrementalAlterConfigs(configOps); waitForCondition(() -> { @@ -1101,7 +1099,7 @@ protected static void stopMirrorMakerConnectors(EmbeddedConnectCluster connectCl } protected static void alterMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, LongUnaryOperator alterOffset, String... topics) { - Set topicsSet = new HashSet<>(Arrays.asList(topics)); + Set topicsSet = Set.of(topics); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); @@ -1131,7 +1129,7 @@ protected static void alterMirrorMakerSourceConnectorOffsets(EmbeddedConnectClus } protected static void resetSomeMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, String... topics) { - Set topicsSet = new HashSet<>(Arrays.asList(topics)); + Set topicsSet = Set.of(topics); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); @@ -1155,7 +1153,7 @@ protected static void resetAllMirrorMakerConnectorOffsets(EmbeddedConnectCluster String connectorName = connectorClass.getSimpleName(); connectCluster.resetConnectorOffsets(connectorName); assertEquals( - Collections.emptyList(), + List.of(), connectCluster.connectorOffsets(connectorName).offsets(), "Offsets for connector should be completely empty after full reset" ); @@ -1181,7 +1179,7 @@ protected static void waitForTopicCreated(EmbeddedConnectCluster cluster, String */ protected static String getTopicConfig(EmbeddedKafkaCluster cluster, String topic, String configName) throws Exception { try (Admin client = cluster.createAdminClient()) { - Collection cr = Collections.singleton( + Collection cr = Set.of( new ConfigResource(ConfigResource.Type.TOPIC, topic)); DescribeConfigsResult configsResult = client.describeConfigs(cr); @@ -1200,7 +1198,7 @@ protected void produceMessages(Producer producer, String topicNa protected Producer initializeProducer(EmbeddedConnectCluster cluster) { - return cluster.kafka().createProducer(Collections.emptyMap()); + return cluster.kafka().createProducer(Map.of()); } /** @@ -1224,7 +1222,7 @@ protected void produceMessages(Producer producer, List waitForCheckpointOnAllPartitions( MirrorClient client, String consumerGroupName, String remoteClusterAlias, String topicName ) throws InterruptedException { - return waitForNewCheckpointOnAllPartitions(client, consumerGroupName, remoteClusterAlias, topicName, Collections.emptyMap()); + return waitForNewCheckpointOnAllPartitions(client, consumerGroupName, remoteClusterAlias, topicName, Map.of()); } protected static Map waitForNewCheckpointOnAllPartitions( @@ -1318,7 +1316,7 @@ private static void waitForConsumerGroupFullSync( private static void assertMonotonicCheckpoints(EmbeddedConnectCluster cluster, String checkpointTopic) { TopicPartition checkpointTopicPartition = new TopicPartition(checkpointTopic, 0); - try (Consumer backupConsumer = cluster.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( + try (Consumer backupConsumer = cluster.kafka().createConsumerAndSubscribeTo(Map.of( "auto.offset.reset", "earliest"), checkpointTopic)) { Map> checkpointsByGroup = new HashMap<>(); long deadline = System.currentTimeMillis() + CHECKPOINT_DURATION_MS; @@ -1390,11 +1388,11 @@ private static Map basicMM2Config() { private void createTopics() { // to verify topic config will be sync-ed across clusters - Map topicConfig = Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); - Map emptyMap = Collections.emptyMap(); + Map topicConfig = Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); + Map emptyMap = Map.of(); // increase admin client request timeout value to make the tests reliable. - Map adminClientConfig = Collections.singletonMap( + Map adminClientConfig = Map.of( AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_DURATION_MS); // create these topics before starting the connectors, so we don't need to wait for discovery @@ -1465,7 +1463,7 @@ public void onPartitionsAssigned(Collection partitions) { topicPartitionsPendingPosition.removeAll(topicPartitionsWithPosition); } assertEquals( - Collections.emptySet(), + Set.of(), topicPartitionsPendingPosition, "Failed to calculate consumer position for one or more partitions on cluster " + clusterName + " in time" ); @@ -1494,7 +1492,7 @@ private static void topicShouldNotBeCreated(EmbeddedConnectCluster cluster, Stri */ protected static void waitForTopicPartitionCreated(EmbeddedConnectCluster cluster, String topicName, int totalNumPartitions) throws InterruptedException { try (final Admin adminClient = cluster.kafka().createAdminClient()) { - waitForCondition(() -> adminClient.describeTopics(Collections.singleton(topicName)).allTopicNames().get() + waitForCondition(() -> adminClient.describeTopics(Set.of(topicName)).allTopicNames().get() .get(topicName).partitions().size() == totalNumPartitions, TOPIC_SYNC_DURATION_MS, "Topic: " + topicName + "'s partitions didn't get created on cluster: " + cluster.getName() ); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java index e02cc4c02b332..5578c2b28774c 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Arrays; +import java.util.List; import java.util.Properties; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,7 +45,7 @@ public void startClusters() throws Exception { BACKUP_CLUSTER_ALIAS + "." + DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, DistributedConfig.ExactlyOnceSourceSupport.ENABLED.toString() ); - for (Properties brokerProps : Arrays.asList(primaryBrokerProps, backupBrokerProps)) { + for (Properties brokerProps : List.of(primaryBrokerProps, backupBrokerProps)) { brokerProps.put("transaction.state.log.replication.factor", "1"); brokerProps.put("transaction.state.log.min.isr", "1"); } @@ -81,7 +81,7 @@ public void testReplication() throws Exception { assertEquals(expectedRecordsTopic2, backup.kafka().consume(expectedRecordsTopic2, RECORD_TRANSFER_DURATION_MS, backupTopic2).count(), "New topic was not re-replicated to backup cluster after altering offsets."); - @SuppressWarnings({"unchecked", "rawtypes"}) + @SuppressWarnings("unchecked") Class[] connectorsToReset = CONNECTOR_LIST.toArray(new Class[0]); stopMirrorMakerConnectors(backup, connectorsToReset); // Resetting the offsets for the heartbeat and checkpoint connectors doesn't have any effect diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java index 853cd02f13401..814a03d278b60 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java @@ -43,9 +43,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -153,7 +151,7 @@ public void startClusters() throws Exception { additionalBackupClusterClientsConfigs.putAll(superUserConfig()); backupWorkerProps.putAll(superUserConfig()); - HashMap additionalConfig = new HashMap(superUserConfig()) {{ + Map additionalConfig = new HashMap<>(superUserConfig()) {{ put(FORWARDING_ADMIN_CLASS, FakeForwardingAdminWithLocalMetadata.class.getName()); }}; @@ -172,7 +170,7 @@ public void startClusters() throws Exception { startClusters(additionalConfig); try (Admin adminClient = primary.kafka().createAdminClient()) { - adminClient.createAcls(Collections.singletonList( + adminClient.createAcls(List.of( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -180,7 +178,7 @@ public void startClusters() throws Exception { )).all().get(); } try (Admin adminClient = backup.kafka().createAdminClient()) { - adminClient.createAcls(Collections.singletonList( + adminClient.createAcls(List.of( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -202,7 +200,7 @@ public void testReplicationIsCreatingTopicsUsingProvidedForwardingAdmin() throws produceMessages(primaryProducer, "test-topic-1"); produceMessages(backupProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -239,7 +237,7 @@ public void testCreatePartitionsUseProvidedForwardingAdmin() throws Exception { produceMessages(backupProducer, "test-topic-1"); produceMessages(primaryProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -255,7 +253,7 @@ public void testCreatePartitionsUseProvidedForwardingAdmin() throws Exception { waitForTopicToPersistInFakeLocalMetadataStore("primary.test-topic-1"); // increase number of partitions - Map newPartitions = Collections.singletonMap("test-topic-1", NewPartitions.increaseTo(NUM_PARTITIONS + 1)); + Map newPartitions = Map.of("test-topic-1", NewPartitions.increaseTo(NUM_PARTITIONS + 1)); try (Admin adminClient = primary.kafka().createAdminClient()) { adminClient.createPartitions(newPartitions).all().get(); } @@ -274,7 +272,7 @@ public void testSyncTopicConfigUseProvidedForwardingAdmin() throws Exception { produceMessages(backupProducer, "test-topic-1"); produceMessages(primaryProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); + Map consumerProps = Map.of("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -302,7 +300,7 @@ public void testSyncTopicACLsUseProvidedForwardingAdmin() throws Exception { mm2Props.put("sync.topic.acls.enabled", "true"); mm2Props.put("sync.topic.acls.interval.seconds", "1"); mm2Config = new MirrorMakerConfig(mm2Props); - List aclBindings = Collections.singletonList( + List aclBindings = List.of( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test-topic-1", PatternType.LITERAL), new AccessControlEntry("User:dummy", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW) @@ -344,7 +342,7 @@ public void testSyncTopicACLsUseProvidedForwardingAdmin() throws Exception { ); // expect to use FakeForwardingAdminWithLocalMetadata to update topic ACLs in FakeLocalMetadataStore.allAcls - assertTrue(FakeLocalMetadataStore.aclBindings("dummy").containsAll(Arrays.asList(expectedACLOnBackupCluster, expectedACLOnPrimaryCluster))); + assertTrue(FakeLocalMetadataStore.aclBindings("dummy").containsAll(List.of(expectedACLOnBackupCluster, expectedACLOnPrimaryCluster))); } void waitForTopicToPersistInFakeLocalMetadataStore(String topicName) throws InterruptedException { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java index 5cfa300bafc2b..5a8bc5e08ae4f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java @@ -34,7 +34,7 @@ import java.net.URI; import java.util.Arrays; -import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -82,7 +82,7 @@ protected abstract H createHerder(T config, String workerId, Plugins plugins, * Validate {@link #args}, process worker properties from the first CLI argument, and start {@link Connect} */ public void run() { - if (args.length < 1 || Arrays.asList(args).contains("--help")) { + if (args.length < 1 || List.of(args).contains("--help")) { log.info("Usage: {}", usage()); Exit.exit(1); } @@ -90,7 +90,7 @@ public void run() { try { String workerPropsFile = args[0]; Map workerProps = !workerPropsFile.isEmpty() ? - Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap(); + Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Map.of(); String[] extraArgs = Arrays.copyOfRange(args, 1, args.length); Connect connect = startConnect(workerProps); processExtraArgs(connect, extraArgs); @@ -114,14 +114,15 @@ public Connect startConnect(Map workerProps) { log.info("Kafka Connect worker initializing ..."); long initStart = time.hiResClockMs(); + T config = createConfig(workerProps); + log.debug("Kafka cluster ID: {}", config.kafkaClusterId()); + WorkerInfo initInfo = new WorkerInfo(); initInfo.logAll(); log.info("Scanning for plugin classes. This might take a moment ..."); Plugins plugins = new Plugins(workerProps); plugins.compareAndSwapWithDelegatingLoader(); - T config = createConfig(workerProps); - log.debug("Kafka cluster ID: {}", config.kafkaClusterId()); RestClient restClient = new RestClient(config); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java index 8763dd908a179..59b943ae91913 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java @@ -36,8 +36,8 @@ import org.apache.kafka.connect.util.ConnectUtils; import org.apache.kafka.connect.util.SharedTopicAdmin; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; @@ -77,7 +77,7 @@ protected DistributedHerder createHerder(DistributedConfig config, String worker KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin, () -> clientIdBase, plugins.newInternalConverter(true, JsonConverter.class.getName(), - Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, Time.SYSTEM, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy); @@ -99,7 +99,7 @@ protected DistributedHerder createHerder(DistributedConfig config, String worker return new DistributedHerder(config, Time.SYSTEM, worker, kafkaClusterId, statusBackingStore, configBackingStore, restServer.advertisedUrl().toString(), restClient, connectorClientConfigOverridePolicy, - Collections.emptyList(), sharedAdmin); + List.of(), sharedAdmin); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java index 43af6b274b6ac..ded4103c69c50 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java @@ -49,7 +49,6 @@ import java.io.File; import java.io.IOException; import java.nio.file.Paths; -import java.util.Collections; import java.util.Map; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -163,7 +162,7 @@ protected StandaloneHerder createHerder(StandaloneConfig config, String workerId RestServer restServer, RestClient restClient) { OffsetBackingStore offsetBackingStore = new FileOffsetBackingStore(plugins.newInternalConverter( - true, JsonConverter.class.getName(), Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + true, JsonConverter.class.getName(), Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, Time.SYSTEM, plugins, config, offsetBackingStore, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index 5a787909925da..2e1e1d7318ed4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -203,83 +203,91 @@ public boolean isReady() { @Override public void onStartup(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RUNNING, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } @Override public void onStop(String connector) { statusBackingStore.put(new ConnectorStatus(connector, AbstractStatus.State.STOPPED, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } @Override public void onPause(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.PAUSED, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } @Override public void onResume(String connector) { statusBackingStore.put(new ConnectorStatus(connector, TaskStatus.State.RUNNING, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } @Override public void onShutdown(String connector) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.UNASSIGNED, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } @Override public void onFailure(String connector, Throwable cause) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.FAILED, - trace(cause), workerId, generation())); + trace(cause), workerId, generation(), worker.connectorVersion(connector))); } @Override public void onStartup(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation(), null, + worker.taskVersion(id))); } @Override public void onFailure(ConnectorTaskId id, Throwable cause) { - statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause))); + statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause), + worker.taskVersion(id))); } @Override public void onShutdown(ConnectorTaskId id) { - statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation())); + statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation(), null, + worker.taskVersion(id))); } @Override public void onResume(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation(), null, + worker.taskVersion(id))); } @Override public void onPause(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation())); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation(), null, + worker.taskVersion(id))); } @Override public void onDeletion(String connector) { for (TaskStatus status : statusBackingStore.getAll(connector)) onDeletion(status.id()); - statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation())); + statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation(), + worker.connectorVersion(connector))); } @Override public void onDeletion(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation())); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation(), null, + worker.taskVersion(id))); } public void onRestart(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RESTARTING, - workerId, generation())); + workerId, generation(), worker.connectorVersion(connector))); } public void onRestart(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RESTARTING, workerId, generation())); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RESTARTING, workerId, generation(), null, + worker.taskVersion(id))); } @Override @@ -347,12 +355,12 @@ public ConnectorStateInfo connectorStatus(String connName) { Collection tasks = statusBackingStore.getAll(connName); ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState( - connector.state().toString(), connector.workerId(), connector.trace()); + connector.state().toString(), connector.workerId(), connector.trace(), connector.version()); List taskStates = new ArrayList<>(); for (TaskStatus status : tasks) { taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(), - status.state().toString(), status.workerId(), status.trace())); + status.state().toString(), status.workerId(), status.trace(), status.version())); } Collections.sort(taskStates); @@ -388,7 +396,7 @@ public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) { throw new NotFoundException("No status found for task " + id); return new ConnectorStateInfo.TaskState(id.task(), status.state().toString(), - status.workerId(), status.trace()); + status.workerId(), status.trace(), status.version()); } @Override @@ -547,7 +555,7 @@ private ConfigInfos validateAllConverterConfigs( "header converter", HEADER_CONVERTER_CLASS_CONFIG, HEADER_CONVERTER_VERSION_CONFIG, - Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), + Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), connectorLoader, reportStage ); @@ -560,7 +568,7 @@ private ConfigInfos validateAllConverterConfigs( "key converter", KEY_CONVERTER_CLASS_CONFIG, KEY_CONVERTER_VERSION_CONFIG, - Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), + Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), connectorLoader, reportStage ); @@ -574,7 +582,7 @@ private ConfigInfos validateAllConverterConfigs( "value converter", VALUE_CONVERTER_CLASS_CONFIG, VALUE_CONVERTER_VERSION_CONFIG, - Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), + Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), connectorLoader, reportStage ); @@ -626,7 +634,8 @@ public Optional buildRestartPlan(RestartRequest request) { ConnectorStateInfo.ConnectorState connectorInfoState = new ConnectorStateInfo.ConnectorState( connectorState.toString(), connectorStatus.workerId(), - connectorStatus.trace() + connectorStatus.trace(), + connectorStatus.version() ); // Collect the task states, If requested, mark the task as restarting @@ -638,7 +647,8 @@ public Optional buildRestartPlan(RestartRequest request) { taskStatus.id().task(), taskState.toString(), taskStatus.workerId(), - taskStatus.trace() + taskStatus.trace(), + taskStatus.version() ); }) .collect(Collectors.toList()); @@ -887,7 +897,7 @@ private static ConfigInfos mergeConfigInfos(String connType, ConfigInfos... conf for (ConfigInfos configInfos : configInfosList) { if (configInfos != null) { errorCount += configInfos.errorCount(); - configInfoList.addAll(configInfos.values()); + configInfoList.addAll(configInfos.configs()); groups.addAll(configInfos.groups()); } } @@ -1063,7 +1073,7 @@ protected final boolean maybeAddConfigErrors( StringBuilder messages = new StringBuilder(); messages.append("Connector configuration is invalid and contains the following ") .append(errors).append(" error(s):"); - for (ConfigInfo configInfo : configInfos.values()) { + for (ConfigInfo configInfo : configInfos.configs()) { for (String msg : configInfo.configValue().errors()) { messages.append('\n').append(msg); } @@ -1268,7 +1278,7 @@ public List setWorkerLoggerLevel(String namespace, String desiredLevelSt if (!loggers.isValidLevel(normalizedLevel)) { log.warn("Ignoring request to set invalid level '{}' for namespace {}", desiredLevelStr, namespace); - return Collections.emptyList(); + return List.of(); } return loggers.setLevel(namespace, normalizedLevel); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java index 76036d610d738..fc8bc7ca05061 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java @@ -34,18 +34,29 @@ public enum State { private final State state; private final String trace; private final String workerId; + private final String version; private final int generation; public AbstractStatus(T id, State state, String workerId, int generation, - String trace) { + String trace, + String version) { this.id = id; this.state = state; this.workerId = workerId; this.generation = generation; this.trace = trace; + this.version = version; + } + + public AbstractStatus(T id, + State state, + String workerId, + int generation, + String trace) { + this(id, state, workerId, generation, trace, null); } public T id() { @@ -68,12 +79,17 @@ public int generation() { return generation; } + public String version() { + return version; + } + @Override public String toString() { return "Status{" + "id=" + id + ", state=" + state + ", workerId='" + workerId + '\'' + + ", version='" + version + '\'' + ", generation=" + generation + '}'; } @@ -89,7 +105,8 @@ public boolean equals(Object o) { && Objects.equals(id, that.id) && state == that.state && Objects.equals(trace, that.trace) - && Objects.equals(workerId, that.workerId); + && Objects.equals(workerId, that.workerId) + && Objects.equals(version, that.version); } @Override @@ -98,6 +115,7 @@ public int hashCode() { result = 31 * result + (state != null ? state.hashCode() : 0); result = 31 * result + (trace != null ? trace.hashCode() : 0); result = 31 * result + (workerId != null ? workerId.hashCode() : 0); + result = 31 * result + (version != null ? version.hashCode() : 0); result = 31 * result + generation; return result; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java index 683eb3abed0f2..9a74d81770fa7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java @@ -203,6 +203,7 @@ protected abstract void producerSendFailed( private final boolean topicTrackingEnabled; private final TopicCreation topicCreation; private final Executor closeExecutor; + private final String version; // Visible for testing List toSend; @@ -236,11 +237,12 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, StatusBackingStore statusBackingStore, Executor closeExecutor, Supplier>> errorReportersSupplier, + TaskPluginsMetadata pluginsMetadata, Function pluginLoaderSwapper) { super(id, statusListener, initialState, loader, connectMetrics, errorMetrics, retryWithToleranceOperator, transformationChain, errorReportersSupplier, - time, statusBackingStore, pluginLoaderSwapper); + time, statusBackingStore, pluginsMetadata, pluginLoaderSwapper); this.workerConfig = workerConfig; this.task = task; @@ -258,6 +260,7 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, this.sourceTaskMetricsGroup = new SourceTaskMetricsGroup(id, connectMetrics); this.topicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.topicCreation = TopicCreation.newTopicCreation(workerConfig, topicGroups); + this.version = task.version(); } @Override @@ -391,6 +394,11 @@ public void execute() { finalOffsetCommit(false); } + @Override + public String taskVersion() { + return version; + } + /** * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java index 90e6650df371f..834086490002f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java @@ -253,7 +253,7 @@ public MetricGroupId(String groupName, Map tags) { Objects.requireNonNull(groupName); Objects.requireNonNull(tags); this.groupName = groupName; - this.tags = Collections.unmodifiableMap(new LinkedHashMap<>(tags)); + this.tags = Collections.unmodifiableMap(new LinkedHashMap<>(tags)); // To ensure the order of insertion, we have to use Collections. this.hc = Objects.hash(this.groupName, this.tags); StringBuilder sb = new StringBuilder(this.groupName); for (Map.Entry entry : this.tags.entrySet()) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java index 1d144440f2c20..7f879ea8f2aa8 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java @@ -19,8 +19,6 @@ import org.apache.kafka.common.MetricNameTemplate; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -37,6 +35,10 @@ public class ConnectMetricsRegistry { public static final String WORKER_GROUP_NAME = "connect-worker-metrics"; public static final String WORKER_REBALANCE_GROUP_NAME = "connect-worker-rebalance-metrics"; public static final String TASK_ERROR_HANDLING_GROUP_NAME = "task-error-metrics"; + public static final String TRANSFORMS_GROUP = "connector-transform-metrics"; + public static final String PREDICATES_GROUP = "connector-predicate-metrics"; + public static final String TRANSFORM_TAG_NAME = "transform"; + public static final String PREDICATE_TAG_NAME = "predicate"; private final List allTemplates = new ArrayList<>(); public final MetricNameTemplate connectorStatus; @@ -59,6 +61,17 @@ public class ConnectMetricsRegistry { public final MetricNameTemplate taskBatchSizeAvg; public final MetricNameTemplate taskCommitFailurePercentage; public final MetricNameTemplate taskCommitSuccessPercentage; + public final MetricNameTemplate taskConnectorClass; + public final MetricNameTemplate taskConnectorClassVersion; + public final MetricNameTemplate taskConnectorType; + public final MetricNameTemplate taskClass; + public final MetricNameTemplate taskVersion; + public final MetricNameTemplate taskKeyConverterClass; + public final MetricNameTemplate taskValueConverterClass; + public final MetricNameTemplate taskKeyConverterVersion; + public final MetricNameTemplate taskValueConverterVersion; + public final MetricNameTemplate taskHeaderConverterClass; + public final MetricNameTemplate taskHeaderConverterVersion; public final MetricNameTemplate sourceRecordPollRate; public final MetricNameTemplate sourceRecordPollTotal; public final MetricNameTemplate sourceRecordWriteRate; @@ -115,8 +128,12 @@ public class ConnectMetricsRegistry { public final MetricNameTemplate transactionSizeMin; public final MetricNameTemplate transactionSizeMax; public final MetricNameTemplate transactionSizeAvg; + public final MetricNameTemplate transformClass; + public final MetricNameTemplate transformVersion; + public final MetricNameTemplate predicateClass; + public final MetricNameTemplate predicateVersion; - public Map connectorStatusMetrics; + public final Map connectorStatusMetrics; public ConnectMetricsRegistry() { this(new LinkedHashSet<>()); @@ -164,6 +181,43 @@ public ConnectMetricsRegistry(Set tags) { taskCommitSuccessPercentage = createTemplate("offset-commit-success-percentage", TASK_GROUP_NAME, "The average percentage of this task's offset commit attempts that succeeded.", workerTaskTags); + taskConnectorClass = createTemplate("connector-class", TASK_GROUP_NAME, "The name of the connector class.", workerTaskTags); + taskConnectorClassVersion = createTemplate("connector-version", TASK_GROUP_NAME, + "The version of the connector class, as reported by the connector.", workerTaskTags); + taskConnectorType = createTemplate("connector-type", TASK_GROUP_NAME, "The type of the connector. One of 'source' or 'sink'.", + workerTaskTags); + taskClass = createTemplate("task-class", TASK_GROUP_NAME, "The class name of the task.", workerTaskTags); + taskVersion = createTemplate("task-version", TASK_GROUP_NAME, "The version of the task.", workerTaskTags); + taskKeyConverterClass = createTemplate("key-converter-class", TASK_GROUP_NAME, + "The fully qualified class name from key.converter", workerTaskTags); + taskValueConverterClass = createTemplate("value-converter-class", TASK_GROUP_NAME, + "The fully qualified class name from value.converter", workerTaskTags); + taskKeyConverterVersion = createTemplate("key-converter-version", TASK_GROUP_NAME, + "The version instantiated for key.converter. May be undefined", workerTaskTags); + taskValueConverterVersion = createTemplate("value-converter-version", TASK_GROUP_NAME, + "The version instantiated for value.converter. May be undefined", workerTaskTags); + taskHeaderConverterClass = createTemplate("header-converter-class", TASK_GROUP_NAME, + "The fully qualified class name from header.converter", workerTaskTags); + taskHeaderConverterVersion = createTemplate("header-converter-version", TASK_GROUP_NAME, + "The version instantiated for header.converter. May be undefined", workerTaskTags); + + /* Transformation Metrics */ + Set transformTags = new LinkedHashSet<>(tags); + transformTags.addAll(workerTaskTags); + transformTags.add(TRANSFORM_TAG_NAME); + transformClass = createTemplate("transform-class", TRANSFORMS_GROUP, + "The class name of the transformation class", transformTags); + transformVersion = createTemplate("transform-version", TRANSFORMS_GROUP, + "The version of the transformation class", transformTags); + + /* Predicate Metrics */ + Set predicateTags = new LinkedHashSet<>(tags); + predicateTags.addAll(workerTaskTags); + predicateTags.add(PREDICATE_TAG_NAME); + predicateClass = createTemplate("predicate-class", PREDICATES_GROUP, + "The class name of the predicate class", predicateTags); + predicateVersion = createTemplate("predicate-version", PREDICATES_GROUP, + "The version of the predicate class", predicateTags); /* Source worker task level */ Set sourceTaskTags = new LinkedHashSet<>(tags); @@ -332,14 +386,14 @@ public ConnectMetricsRegistry(Set tags) { WORKER_GROUP_NAME, "The number of restarting tasks of the connector on the worker.", workerConnectorTags); - connectorStatusMetrics = new HashMap<>(); - connectorStatusMetrics.put(connectorRunningTaskCount, TaskStatus.State.RUNNING); - connectorStatusMetrics.put(connectorPausedTaskCount, TaskStatus.State.PAUSED); - connectorStatusMetrics.put(connectorFailedTaskCount, TaskStatus.State.FAILED); - connectorStatusMetrics.put(connectorUnassignedTaskCount, TaskStatus.State.UNASSIGNED); - connectorStatusMetrics.put(connectorDestroyedTaskCount, TaskStatus.State.DESTROYED); - connectorStatusMetrics.put(connectorRestartingTaskCount, TaskStatus.State.RESTARTING); - connectorStatusMetrics = Collections.unmodifiableMap(connectorStatusMetrics); + connectorStatusMetrics = Map.of( + connectorRunningTaskCount, TaskStatus.State.RUNNING, + connectorPausedTaskCount, TaskStatus.State.PAUSED, + connectorFailedTaskCount, TaskStatus.State.FAILED, + connectorUnassignedTaskCount, TaskStatus.State.UNASSIGNED, + connectorDestroyedTaskCount, TaskStatus.State.DESTROYED, + connectorRestartingTaskCount, TaskStatus.State.RESTARTING + ); /* Worker rebalance level */ Set rebalanceTags = new LinkedHashSet<>(tags); @@ -388,7 +442,7 @@ private MetricNameTemplate createTemplate(String name, String group, String doc, } public List getAllTemplates() { - return Collections.unmodifiableList(allTemplates); + return List.copyOf(allTemplates); } public String connectorTagName() { @@ -426,4 +480,20 @@ public String workerRebalanceGroupName() { public String taskErrorHandlingGroupName() { return TASK_ERROR_HANDLING_GROUP_NAME; } + + public String transformsGroupName() { + return TRANSFORMS_GROUP; + } + + public String transformsTagName() { + return TRANSFORM_TAG_NAME; + } + + public String predicatesGroupName() { + return PREDICATES_GROUP; + } + + public String predicateTagName() { + return PREDICATE_TAG_NAME; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index 3a301335502ff..ff4d399db1a6f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -47,7 +47,6 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -250,8 +249,8 @@ protected static ConfigDef configDef( .define(VALUE_CONVERTER_VERSION_CONFIG, Type.STRING, valueConverterDefaults.version, VALUE_CONVERTER_VERSION_VALIDATOR, Importance.LOW, VALUE_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_VERSION_DISPLAY, recommender.valueConverterPluginVersionRecommender()) .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, headerConverterDefaults.type, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY, recommender.headerConverterPluginRecommender()) .define(HEADER_CONVERTER_VERSION_CONFIG, Type.STRING, headerConverterDefaults.version, HEADER_CONVERTER_VERSION_VALIDATOR, Importance.LOW, HEADER_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_VERSION_DISPLAY, recommender.headerConverterPluginVersionRecommender()) - .define(TRANSFORMS_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) - .define(PREDICATES_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) + .define(TRANSFORMS_CONFIG, Type.LIST, List.of(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) + .define(PREDICATES_CONFIG, Type.LIST, List.of(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) .define(CONFIG_RELOAD_ACTION_CONFIG, Type.STRING, CONFIG_RELOAD_ACTION_RESTART, in(CONFIG_RELOAD_ACTION_NONE, CONFIG_RELOAD_ACTION_RESTART), Importance.LOW, CONFIG_RELOAD_ACTION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONFIG_RELOAD_ACTION_DISPLAY) @@ -303,7 +302,7 @@ private static ConfigDef.CompositeValidator aliasValidator(String kind) { } public ConnectorConfig(Plugins plugins) { - this(plugins, Collections.emptyMap()); + this(plugins, Map.of()); } public ConnectorConfig(Plugins plugins, Map props) { @@ -374,7 +373,7 @@ public > List> transformationS final String versionConfig = prefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; final Transformation transformation = getTransformationOrPredicate(plugins, typeConfig, versionConfig); Map configs = originalsWithPrefix(prefix); - Object predicateAlias = configs.remove(TransformationStage.PREDICATE_CONFIG); + String predicateAlias = (String) configs.remove(TransformationStage.PREDICATE_CONFIG); Object negate = configs.remove(TransformationStage.NEGATE_CONFIG); transformation.configure(configs); Plugin> transformationPlugin = metrics.wrap(transformation, connectorTaskId, alias); @@ -384,10 +383,24 @@ public > List> transformationS final String predicateVersionConfig = predicatePrefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; Predicate predicate = getTransformationOrPredicate(plugins, predicateTypeConfig, predicateVersionConfig); predicate.configure(originalsWithPrefix(predicatePrefix)); - Plugin> predicatePlugin = metrics.wrap(predicate, connectorTaskId, (String) predicateAlias); - transformations.add(new TransformationStage<>(predicatePlugin, negate != null && Boolean.parseBoolean(negate.toString()), transformationPlugin, plugins.safeLoaderSwapper())); + Plugin> predicatePlugin = metrics.wrap(predicate, connectorTaskId, predicateAlias); + transformations.add(new TransformationStage<>( + predicatePlugin, + predicateAlias, + plugins.pluginVersion(predicate.getClass().getName(), predicate.getClass().getClassLoader(), PluginType.PREDICATE), + negate != null && Boolean.parseBoolean(negate.toString()), + transformationPlugin, + alias, + plugins.pluginVersion(transformation.getClass().getName(), transformation.getClass().getClassLoader(), PluginType.TRANSFORMATION), + plugins.safeLoaderSwapper()) + ); } else { - transformations.add(new TransformationStage<>(transformationPlugin, plugins.safeLoaderSwapper())); + transformations.add(new TransformationStage<>( + transformationPlugin, + alias, + plugins.pluginVersion(transformation.getClass().getName(), transformation.getClass().getClassLoader(), PluginType.TRANSFORMATION), + plugins.safeLoaderSwapper()) + ); } } catch (Exception e) { throw new ConnectException(e); @@ -632,7 +645,7 @@ void enrich(ConfigDef newDef, Plugins plugins) { newDef.define(typeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH, "Class for the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, baseClass.getSimpleName() + " type for " + alias, - Collections.emptyList(), new ClassRecommender()); + List.of(), new ClassRecommender()); // Add the version configuration final ConfigDef.Validator versionValidator = (name, value) -> { @@ -650,7 +663,7 @@ void enrich(ConfigDef newDef, Plugins plugins) { newDef.define(versionConfig, Type.STRING, defaultVersion, versionValidator, Importance.HIGH, "Version of the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, baseClass.getSimpleName() + " version for " + alias, - Collections.emptyList(), versionRecommender(typeConfig)); + List.of(), versionRecommender(typeConfig)); final ConfigDef configDef = populateConfigDef(typeConfig, versionConfig, plugins); if (configDef == null) continue; @@ -766,11 +779,7 @@ final class ClassRecommender implements ConfigDef.Recommender { @Override public List validValues(String name, Map parsedConfig) { - List result = new ArrayList<>(); - for (PluginDesc plugin : plugins()) { - result.add(plugin.pluginClass()); - } - return Collections.unmodifiableList(result); + return plugins().stream().map(p -> (Object) p.pluginClass()).toList(); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java index 10ed188cdf883..d704a3374e296 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java @@ -19,8 +19,12 @@ public class ConnectorStatus extends AbstractStatus { - public ConnectorStatus(String connector, State state, String msg, String workerUrl, int generation) { - super(connector, state, workerUrl, generation, msg); + public ConnectorStatus(String connector, State state, String msg, String workerUrl, int generation, String version) { + super(connector, state, workerUrl, generation, msg, version); + } + + public ConnectorStatus(String connector, State state, String workerUrl, int generation, String version) { + super(connector, state, workerUrl, generation, null, version); } public ConnectorStatus(String connector, State state, String workerUrl, int generation) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java index 42e43babe557d..d6f4ffbd4b9be 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java @@ -104,11 +104,12 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, Runnable preProducerCheck, Runnable postProducerCheck, Supplier>> errorReportersSupplier, + TaskPluginsMetadata pluginsMetadata, Function pluginLoaderSwapper) { super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, buildTransactionContext(sourceConfig), producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, - loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginLoaderSwapper); + loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginsMetadata, pluginLoaderSwapper); this.transactionOpen = false; this.committableRecords = new LinkedHashMap<>(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java index 3767e31ac7ce7..1a79698ae9f56 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java @@ -255,7 +255,7 @@ Map currentLoggers() { LoggerContext context = (LoggerContext) LogManager.getContext(false); var results = new HashMap(); context.getConfiguration().getLoggers().forEach((name, logger) -> results.put(name, loggerContext.getLogger(name))); - context.getLoggerRegistry().getLoggers().forEach(logger -> results.put(logger.getName(), logger)); + context.getLoggers().forEach(logger -> results.put(logger.getName(), logger)); return results; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java index 4584255e23132..43da717a78cf4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java @@ -27,7 +27,6 @@ import org.apache.kafka.connect.transforms.util.RegexValidator; import java.util.ArrayList; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -42,7 +41,7 @@ public class SinkConnectorConfig extends ConnectorConfig { public static final String TOPICS_CONFIG = SinkTask.TOPICS_CONFIG; private static final String TOPICS_DOC = "List of topics to consume, separated by commas"; - public static final String TOPICS_DEFAULT = ""; + public static final List TOPICS_DEFAULT = List.of(); private static final String TOPICS_DISPLAY = "Topics"; public static final String TOPICS_REGEX_CONFIG = SinkTask.TOPICS_REGEX_CONFIG; @@ -75,7 +74,7 @@ public class SinkConnectorConfig extends ConnectorConfig { private static ConfigDef configDef(ConfigDef baseConfigs) { return baseConfigs - .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) + .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) @@ -169,7 +168,7 @@ public static void validate(Map props, Map private static void addErrorMessage(Map validatedConfig, String name, String value, String errorMessage) { validatedConfig.computeIfAbsent( name, - p -> new ConfigValue(name, value, Collections.emptyList(), new ArrayList<>()) + p -> new ConfigValue(name, value, List.of(), new ArrayList<>()) ).addErrorMessage( errorMessage ); @@ -189,7 +188,7 @@ public static boolean hasDlqTopicConfig(Map props) { public static List parseTopicsList(Map props) { List topics = (List) ConfigDef.parseType(TOPICS_CONFIG, props.get(TOPICS_CONFIG), Type.LIST); if (topics == null) { - return Collections.emptyList(); + return List.of(); } return topics .stream() diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java index 336468f491aa3..e9913e81f4c72 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java @@ -26,12 +26,12 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import static org.apache.kafka.common.utils.Utils.enumOptions; @@ -132,7 +132,7 @@ private static ConfigDef configDef(ConfigDef baseConfigDef) { .define( TOPIC_CREATION_GROUPS_CONFIG, ConfigDef.Type.LIST, - Collections.emptyList(), + List.of(), ConfigDef.CompositeValidator.of( new ConfigDef.NonNullValidator(), ConfigDef.LambdaValidator.with( @@ -240,7 +240,7 @@ public static ConfigDef enrich(ConfigDef baseConfigDef, Map prop if (topicCreationGroups.contains(DEFAULT_TOPIC_CREATION_GROUP)) { log.warn("'{}' topic creation group always exists and does not need to be listed explicitly", DEFAULT_TOPIC_CREATION_GROUP); - topicCreationGroups.removeAll(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP)); + topicCreationGroups.removeAll(Set.of(DEFAULT_TOPIC_CREATION_GROUP)); } ConfigDef newDef = new ConfigDef(baseConfigDef); @@ -332,7 +332,7 @@ public Integer topicCreationPartitions(String group) { public Map topicCreationOtherConfigs(String group) { if (enrichedSourceConfig == null) { - return Collections.emptyMap(); + return Map.of(); } return enrichedSourceConfig.originalsWithPrefix(TOPIC_CREATION_PREFIX + group + '.').entrySet().stream() .filter(e -> { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java index accbb0196d7a2..86cec8080db3b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java @@ -250,7 +250,7 @@ record CommittableOffsets(Map, Map> offsets, /** * An "empty" snapshot that contains no offsets to commit and whose metadata contains no committable or uncommitable messages. */ - public static final CommittableOffsets EMPTY = new CommittableOffsets(Collections.emptyMap(), 0, 0, 0, 0, null); + public static final CommittableOffsets EMPTY = new CommittableOffsets(Map.of(), 0, 0, 0, 0, null); CommittableOffsets { offsets = Collections.unmodifiableMap(offsets); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskPluginsMetadata.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskPluginsMetadata.java new file mode 100644 index 0000000000000..14e6cb9b7a717 --- /dev/null +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskPluginsMetadata.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.runtime; + +import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.runtime.isolation.PluginType; +import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.rest.entities.ConnectorType; +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.connect.storage.HeaderConverter; + +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class TaskPluginsMetadata { + + private final String connectorClass; + private final String connectorVersion; + private final ConnectorType connectorType; + private final String taskClass; + private final String taskVersion; + private final String keyConverterClass; + private final String keyConverterVersion; + private final String valueConverterClass; + private final String valueConverterVersion; + private final String headerConverterClass; + private final String headerConverterVersion; + private final Set transformations; + private final Set predicates; + + public TaskPluginsMetadata( + Class connectorClass, + Task task, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, + List transformationStageInfo, + Plugins plugins + ) { + + assert connectorClass != null; + assert task != null; + assert keyConverter != null; + assert valueConverter != null; + assert headerConverter != null; + assert transformationStageInfo != null; + + this.connectorClass = connectorClass.getName(); + this.connectorVersion = plugins.pluginVersion(connectorClass.getName(), connectorClass.getClassLoader(), PluginType.SINK, PluginType.SOURCE); + this.connectorType = ConnectorType.from(connectorClass); + this.taskClass = task.getClass().getName(); + this.taskVersion = task.version(); + this.keyConverterClass = keyConverter.getClass().getName(); + this.keyConverterVersion = plugins.pluginVersion(keyConverter.getClass().getName(), keyConverter.getClass().getClassLoader(), PluginType.CONVERTER); + this.valueConverterClass = valueConverter.getClass().getName(); + this.valueConverterVersion = plugins.pluginVersion(valueConverter.getClass().getName(), valueConverter.getClass().getClassLoader(), PluginType.CONVERTER); + this.headerConverterClass = headerConverter.getClass().getName(); + this.headerConverterVersion = plugins.pluginVersion(headerConverter.getClass().getName(), headerConverter.getClass().getClassLoader(), PluginType.HEADER_CONVERTER); + this.transformations = transformationStageInfo.stream().map(TransformationStage.StageInfo::transform).collect(Collectors.toSet()); + this.predicates = transformationStageInfo.stream().map(TransformationStage.StageInfo::predicate).filter(Objects::nonNull).collect(Collectors.toSet()); + } + + public String connectorClass() { + return connectorClass; + } + + public String connectorVersion() { + return connectorVersion; + } + + public ConnectorType connectorType() { + return connectorType; + } + + public String taskClass() { + return taskClass; + } + + public String taskVersion() { + return taskVersion; + } + + public String keyConverterClass() { + return keyConverterClass; + } + + public String keyConverterVersion() { + return keyConverterVersion; + } + + public String valueConverterClass() { + return valueConverterClass; + } + + public String valueConverterVersion() { + return valueConverterVersion; + } + + public String headerConverterClass() { + return headerConverterClass; + } + + public String headerConverterVersion() { + return headerConverterVersion; + } + + public Set transformations() { + return transformations; + } + + public Set predicates() { + return predicates; + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java index e35efcafe2e91..45150ef7ef5a3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java @@ -20,8 +20,8 @@ public class TaskStatus extends AbstractStatus { - public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation, String trace) { - super(id, state, workerUrl, generation, trace); + public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation, String trace, String version) { + super(id, state, workerUrl, generation, trace, version); } public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java index 11c2ba9d37425..4339fd6f2364a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.connect.util.TopicAdmin; -import java.util.Collections; import java.util.List; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -102,11 +101,11 @@ public static ConfigDef configDef(String group, short defaultReplicationFactor, int orderInGroup = 0; ConfigDef configDef = new ConfigDef(); configDef - .define(INCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), + .define(INCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), REGEX_VALIDATOR, ConfigDef.Importance.LOW, INCLUDE_REGEX_DOC, group, ++orderInGroup, ConfigDef.Width.LONG, "Inclusion Topic Pattern for " + group) - .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), + .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), REGEX_VALIDATOR, ConfigDef.Importance.LOW, EXCLUDE_REGEX_DOC, group, ++orderInGroup, ConfigDef.Width.LONG, "Exclusion Topic Pattern for " + group) @@ -129,7 +128,7 @@ public static ConfigDef defaultGroupConfigDef() { new ConfigDef.NonNullValidator(), ConfigDef.Importance.LOW, INCLUDE_REGEX_DOC, DEFAULT_TOPIC_CREATION_GROUP, ++orderInGroup, ConfigDef.Width.LONG, "Inclusion Topic Pattern for " + DEFAULT_TOPIC_CREATION_GROUP) - .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), + .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), new ConfigDef.NonNullValidator(), ConfigDef.Importance.LOW, EXCLUDE_REGEX_DOC, DEFAULT_TOPIC_CREATION_GROUP, ++orderInGroup, ConfigDef.Width.LONG, "Exclusion Topic Pattern for " + DEFAULT_TOPIC_CREATION_GROUP) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java index f6b92697c443b..68d52f2c1ca3d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Objects; import java.util.StringJoiner; +import java.util.stream.Collectors; /** * Represents a chain of {@link Transformation}s to be applied to a {@link ConnectRecord} serially. @@ -89,4 +90,8 @@ public String toString() { } return chain.toString(); } + + public List transformationChainInfo() { + return transformationStages.stream().map(TransformationStage::transformationStageInfo).collect(Collectors.toList()); + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java index a86c4878ab37e..56293e0363206 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java @@ -24,6 +24,7 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; +import java.util.Objects; import java.util.function.Function; /** @@ -39,18 +40,40 @@ public class TransformationStage> implements AutoClos private final Plugin> predicatePlugin; private final Plugin> transformationPlugin; private final boolean negate; + private final String transformAlias; + private final String predicateAlias; + private final String transformVersion; + private final String predicateVersion; private final Function pluginLoaderSwapper; - TransformationStage(Plugin> transformationPlugin, Function pluginLoaderSwapper) { - this(null, false, transformationPlugin, pluginLoaderSwapper); + TransformationStage( + Plugin> transformationPlugin, + String transformAlias, + String transformVersion, + Function pluginLoaderSwapper + ) { + this(null, null, null, false, transformationPlugin, transformAlias, transformVersion, pluginLoaderSwapper); } - TransformationStage(Plugin> predicatePlugin, boolean negate, Plugin> transformationPlugin, Function pluginLoaderSwapper) { + TransformationStage( + Plugin> predicatePlugin, + String predicateAlias, + String predicateVersion, + boolean negate, + Plugin> transformationPlugin, + String transformAlias, + String transformVersion, + Function pluginLoaderSwapper + ) { this.predicatePlugin = predicatePlugin; this.negate = negate; this.transformationPlugin = transformationPlugin; this.pluginLoaderSwapper = pluginLoaderSwapper; + this.transformAlias = transformAlias; + this.predicateAlias = predicateAlias; + this.transformVersion = transformVersion; + this.predicateVersion = predicateVersion; } public Class> transformClass() { @@ -89,4 +112,32 @@ public String toString() { ", negate=" + negate + '}'; } + + public record AliasedPluginInfo(String alias, String className, String version) { + public AliasedPluginInfo { + Objects.requireNonNull(alias, "alias cannot be null"); + Objects.requireNonNull(className, "className cannot be null"); + } + } + + + public record StageInfo(AliasedPluginInfo transform, AliasedPluginInfo predicate) { + public StageInfo { + Objects.requireNonNull(transform, "transform cannot be null"); + } + } + + + public StageInfo transformationStageInfo() { + AliasedPluginInfo transformInfo = new AliasedPluginInfo( + transformAlias, + transformationPlugin.get().getClass().getName(), + transformVersion + ); + AliasedPluginInfo predicateInfo = predicatePlugin != null ? new AliasedPluginInfo( + predicateAlias, + predicatePlugin.get().getClass().getName(), predicateVersion + ) : null; + return new StageInfo(transformInfo, predicateInfo); + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java index 1c1acc5647e8e..53cc40d7fd8b7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java @@ -108,7 +108,6 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -197,7 +196,7 @@ public Worker( this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy; this.workerMetricsGroup = new WorkerMetricsGroup(this.connectors, this.tasks, metrics); - Map internalConverterConfig = Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"); + Map internalConverterConfig = Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"); this.internalKeyConverter = plugins.newInternalConverter(true, JsonConverter.class.getName(), internalConverterConfig); this.internalValueConverter = plugins.newInternalConverter(false, JsonConverter.class.getName(), internalConverterConfig); @@ -346,7 +345,7 @@ public void startConnector( } workerConnector = new WorkerConnector( connName, connector, connConfig, ctx, metrics, connectorStatusListener, offsetReader, offsetStore, connectorLoader); - log.info("Instantiated connector {} with version {} of type {}", connName, connector.version(), connector.getClass()); + log.info("Instantiated connector {} with version {} of type {}", connName, workerConnector.connectorVersion(), connector.getClass()); workerConnector.transitionTo(initialState, onConnectorStateChange); } } catch (Throwable t) { @@ -539,7 +538,7 @@ public void stopAndAwaitConnectors(Collection ids) { */ public void stopAndAwaitConnector(String connName) { stopConnector(connName); - awaitStopConnectors(Collections.singletonList(connName)); + awaitStopConnectors(List.of(connName)); } /** @@ -562,6 +561,22 @@ public boolean isRunning(String connName) { return workerConnector != null && workerConnector.isRunning(); } + public String connectorVersion(String connName) { + WorkerConnector conn = connectors.get(connName); + if (conn == null) { + return null; + } + return conn.connectorVersion(); + } + + public String taskVersion(ConnectorTaskId taskId) { + WorkerTask task = tasks.get(taskId); + if (task == null) { + return null; + } + return task.taskVersion(); + } + /** * Start a sink task managed by this worker. * @@ -714,7 +729,7 @@ private boolean startTask( .withKeyConverterPlugin(metrics.wrap(keyConverter, id, true)) .withValueConverterPlugin(metrics.wrap(valueConverter, id, false)) .withHeaderConverterPlugin(metrics.wrap(headerConverter, id)) - .withClassloader(connectorLoader) + .withClassLoader(connectorLoader) .build(); workerTask.initialize(taskConfig); @@ -1133,7 +1148,7 @@ public void stopAndAwaitTasks(Collection ids) { */ public void stopAndAwaitTask(ConnectorTaskId taskId) { stopTask(taskId); - awaitStopTasks(Collections.singletonList(taskId)); + awaitStopTasks(List.of(taskId)); } /** @@ -1546,7 +1561,7 @@ private void alterSinkConnectorOffsets(String connName, String groupId, Admin ad private void resetSinkConnectorOffsets(String connName, String groupId, Admin admin, Callback cb, boolean alterOffsetsResult, Timer timer) { DeleteConsumerGroupsOptions deleteConsumerGroupsOptions = new DeleteConsumerGroupsOptions().timeoutMs((int) timer.remainingMs()); - admin.deleteConsumerGroups(Collections.singleton(groupId), deleteConsumerGroupsOptions) + admin.deleteConsumerGroups(Set.of(groupId), deleteConsumerGroupsOptions) .all() .whenComplete((ignored, error) -> { // We treat GroupIdNotFoundException as a non-error here because resetting a connector's offsets is expected to be an idempotent operation @@ -1814,11 +1829,12 @@ public TaskBuilder withHeaderConverterPlugin(Plugin heade return this; } - public TaskBuilder withClassloader(ClassLoader classLoader) { + public TaskBuilder withClassLoader(ClassLoader classLoader) { this.classLoader = classLoader; return this; } + public WorkerTask build() { Objects.requireNonNull(task, "Task cannot be null"); Objects.requireNonNull(connectorConfig, "Connector config used by task cannot be null"); @@ -1836,10 +1852,13 @@ public WorkerTask build() { TransformationChain transformationChain = new TransformationChain<>(connectorConfig.transformationStages(plugins, id, metrics), retryWithToleranceOperator); log.info("Initializing: {}", transformationChain); + TaskPluginsMetadata taskPluginsMetadata = new TaskPluginsMetadata( + connectorClass, task, keyConverterPlugin.get(), valueConverterPlugin.get(), headerConverterPlugin.get(), transformationChain.transformationChainInfo(), plugins); + return doBuild(task, id, configState, statusListener, initialState, - connectorConfig, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, classLoader, - retryWithToleranceOperator, transformationChain, - errorHandlingMetrics, connectorClass); + connectorConfig, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, classLoader, + retryWithToleranceOperator, transformationChain, + errorHandlingMetrics, connectorClass, taskPluginsMetadata); } abstract WorkerTask doBuild( @@ -1856,7 +1875,8 @@ abstract WorkerTask doBuild( RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass + Class connectorClass, + TaskPluginsMetadata pluginsMetadata ); } @@ -1884,7 +1904,8 @@ public WorkerTask, SinkRecord> doBuild( RetryWithToleranceOperator> retryWithToleranceOperator, TransformationChain, SinkRecord> transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass + Class connectorClass, + TaskPluginsMetadata taskPluginsMetadata ) { SinkConnectorConfig sinkConfig = new SinkConnectorConfig(plugins, connectorConfig.originalsStrings()); WorkerErrantRecordReporter workerErrantRecordReporter = createWorkerErrantRecordReporter(sinkConfig, retryWithToleranceOperator, @@ -1898,7 +1919,7 @@ public WorkerTask, SinkRecord> doBuild( return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, consumer, classLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, herder.statusBackingStore(), - () -> sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass), plugins.safeLoaderSwapper()); + () -> sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass), taskPluginsMetadata, plugins.safeLoaderSwapper()); } } @@ -1925,7 +1946,8 @@ public WorkerTask doBuild( RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass + Class connectorClass, + TaskPluginsMetadata pluginsMetadata ) { SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connectorConfig.originalsStrings(), config.topicCreationEnable()); @@ -1958,7 +1980,7 @@ public WorkerTask doBuild( return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, classLoader, time, - retryWithToleranceOperator, herder.statusBackingStore(), executor, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), plugins.safeLoaderSwapper()); + retryWithToleranceOperator, herder.statusBackingStore(), executor, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), pluginsMetadata, plugins.safeLoaderSwapper()); } } @@ -1992,7 +2014,8 @@ public WorkerTask doBuild( RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass + Class connectorClass, + TaskPluginsMetadata pluginsMetadata ) { SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connectorConfig.originalsStrings(), config.topicCreationEnable()); @@ -2023,7 +2046,7 @@ public WorkerTask doBuild( headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, errorHandlingMetrics, classLoader, time, retryWithToleranceOperator, herder.statusBackingStore(), sourceConfig, executor, preProducerCheck, postProducerCheck, - () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), plugins.safeLoaderSwapper()); + () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), pluginsMetadata, plugins.safeLoaderSwapper()); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java index a68cdb4ea03d0..8d953d7ded35b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java @@ -37,7 +37,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -59,13 +58,12 @@ public class WorkerConfig extends AbstractConfig { private static final Logger log = LoggerFactory.getLogger(WorkerConfig.class); public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"; - public static final String BOOTSTRAP_SERVERS_DOC = + public static final String BOOTSTRAP_SERVERS_DOC = "A list of host/port pairs used to establish the initial connection to the Kafka cluster. " + "Clients use this list to bootstrap and discover the full set of Kafka brokers. " + "While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. " + "This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. " + "This list must be in the form host1:port1,host2:port2,...."; - public static final String BOOTSTRAP_SERVERS_DEFAULT = "localhost:9092"; public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; public static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC; @@ -137,7 +135,7 @@ public class WorkerConfig extends AbstractConfig { + "plugins and their dependencies\n" + "Note: symlinks will be followed to discover dependencies or plugins.\n" + "Examples: plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins," - + "/opt/connectors\n" + + "/opt/connectors\n" + "Do not use config provider variables in this property, since the raw path is used " + "by the worker's scanner before config providers are initialized and used to " + "replace variables."; @@ -154,19 +152,15 @@ public class WorkerConfig extends AbstractConfig { + "* " + SERVICE_LOAD + ": Discover plugins only by ServiceLoader. Faster startup than other modes. " + "Plugins which are not discoverable by ServiceLoader may not be usable."; - public static final String CONFIG_PROVIDERS_CONFIG = "config.providers"; - protected static final String CONFIG_PROVIDERS_DOC = - "Comma-separated names of ConfigProvider classes, loaded and used " - + "in the order specified. Implementing the interface " - + "ConfigProvider allows you to replace variable references in connector configurations, " - + "such as for externalized secrets. "; + public static final String CONFIG_PROVIDERS_CONFIG = AbstractConfig.CONFIG_PROVIDERS_CONFIG; + protected static final String CONFIG_PROVIDERS_DOC = AbstractConfig.CONFIG_PROVIDERS_DOC; public static final String CONNECTOR_CLIENT_POLICY_CLASS_CONFIG = "connector.client.config.override.policy"; public static final String CONNECTOR_CLIENT_POLICY_CLASS_DOC = "Class name or alias of implementation of ConnectorClientConfigOverridePolicy. Defines what client configurations can be " - + "overridden by the connector. The default implementation is `All`, meaning connector configurations can override all client properties. " - + "The other possible policies in the framework include `None` to disallow connectors from overriding client properties, " - + "and `Principal` to allow connectors to override only client principals."; + + "overridden by the connector. The default implementation is All, meaning connector configurations can override all client properties. " + + "The other possible policies in the framework include None to disallow connectors from overriding client properties, " + + "and Principal to allow connectors to override only client principals."; public static final String CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT = "All"; @@ -191,7 +185,7 @@ public class WorkerConfig extends AbstractConfig { public static final String TOPIC_CREATION_ENABLE_CONFIG = "topic.creation.enable"; protected static final String TOPIC_CREATION_ENABLE_DOC = "Whether to allow " + "automatic creation of topics used by source connectors, when source connectors " - + "are configured with `" + TOPIC_CREATION_PREFIX + "` properties. Each task will use an " + + "are configured with " + TOPIC_CREATION_PREFIX + " properties. Each task will use an " + "admin client to create its topics and will not depend on the Kafka brokers " + "to create topics automatically."; protected static final boolean TOPIC_CREATION_ENABLE_DEFAULT = true; @@ -203,7 +197,8 @@ public class WorkerConfig extends AbstractConfig { */ protected static ConfigDef baseConfigDef() { ConfigDef result = new ConfigDef() - .define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, BOOTSTRAP_SERVERS_DEFAULT, + .define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, ConfigDef.NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), Importance.HIGH, BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, @@ -230,6 +225,7 @@ protected static ConfigDef baseConfigDef() { .define(PLUGIN_PATH_CONFIG, Type.LIST, null, + ConfigDef.ValidList.anyNonDuplicateValues(false, true), Importance.LOW, PLUGIN_PATH_DOC) .define(PLUGIN_DISCOVERY_CONFIG, @@ -250,15 +246,19 @@ protected static ConfigDef baseConfigDef() { Importance.LOW, CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, - JmxReporter.class.getName(), Importance.LOW, + JmxReporter.class.getName(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, Importance.LOW, HEADER_CONVERTER_CLASS_DOC) .define(HEADER_CONVERTER_VERSION, Type.STRING, HEADER_CONVERTER_VERSION_DEFAULT, Importance.LOW, HEADER_CONVERTER_VERSION_DOC) - .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, - Collections.emptyList(), + .define(CONFIG_PROVIDERS_CONFIG, + Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, CONFIG_PROVIDERS_DOC) .define(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, Type.STRING, CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT, Importance.MEDIUM, CONNECTOR_CLIENT_POLICY_CLASS_DOC) @@ -320,7 +320,7 @@ static String lookupKafkaClusterId(Admin adminClient) { private void logInternalConverterRemovalWarnings(Map props) { List removedProperties = new ArrayList<>(); - for (String property : Arrays.asList("internal.key.converter", "internal.value.converter")) { + for (String property : List.of("internal.key.converter", "internal.value.converter")) { if (props.containsKey(property)) { removedProperties.add(property); } @@ -329,12 +329,7 @@ private void logInternalConverterRemovalWarnings(Map props) { if (!removedProperties.isEmpty()) { log.warn( "The worker has been configured with one or more internal converter properties ({}). " - + "Support for these properties was deprecated in version 2.0 and removed in version 3.0, " - + "and specifying them will have no effect. " - + "Instead, an instance of the JsonConverter with schemas.enable " - + "set to false will be used. For more information, please visit " - + "https://kafka.apache.org/documentation/#upgrade and consult the upgrade notes" - + "for the 3.0 release.", + + "These properties have been removed since version 3.0 and an instance of the JsonConverter with schemas.enable set to false will be used instead.", removedProperties); } } @@ -347,8 +342,8 @@ private void logPluginPathConfigProviderWarning(Map rawOriginals if (!Objects.equals(rawPluginPath, transformedPluginPath)) { log.warn( "Variables cannot be used in the 'plugin.path' property, since the property is " - + "used by plugin scanning before the config providers that replace the " - + "variables are initialized. The raw value '{}' was used for plugin scanning, as " + + "used by plugin scanning before the config providers that replace the " + + "variables are initialized. The raw value '{}' was used for plugin scanning, as " + "opposed to the transformed value '{}', and this may cause unexpected results.", rawPluginPath, transformedPluginPath diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java index e2473dbbf71e3..3faf70f898c7c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java @@ -78,6 +78,7 @@ private enum State { private volatile Throwable externalFailure; private volatile boolean stopping; // indicates whether the Worker has asked the connector to stop private volatile boolean cancelled; // indicates whether the Worker has cancelled the connector (e.g. because of slow shutdown) + private final String version; private State state; private final CloseableOffsetStorageReader offsetStorageReader; @@ -97,8 +98,9 @@ public WorkerConnector(String connName, this.loader = loader; this.ctx = ctx; this.connector = connector; + this.version = connector.version(); this.state = State.INIT; - this.metrics = new ConnectorMetricsGroup(connectMetrics, AbstractStatus.State.UNASSIGNED, statusListener); + this.metrics = new ConnectorMetricsGroup(connectMetrics, AbstractStatus.State.UNASSIGNED, this.version, statusListener); this.statusListener = this.metrics; this.offsetStorageReader = offsetStorageReader; this.offsetStore = offsetStore; @@ -418,6 +420,10 @@ public final boolean isSourceConnector() { return ConnectUtils.isSourceConnector(connector); } + public String connectorVersion() { + return version; + } + protected final String connectorType() { if (isSinkConnector()) return "sink"; @@ -450,7 +456,12 @@ class ConnectorMetricsGroup implements ConnectorStatus.Listener, AutoCloseable { private final MetricGroup metricGroup; private final ConnectorStatus.Listener delegate; - public ConnectorMetricsGroup(ConnectMetrics connectMetrics, AbstractStatus.State initialState, ConnectorStatus.Listener delegate) { + public ConnectorMetricsGroup( + ConnectMetrics connectMetrics, + AbstractStatus.State initialState, + String connectorVersion, + ConnectorStatus.Listener delegate + ) { Objects.requireNonNull(connectMetrics); Objects.requireNonNull(connector); Objects.requireNonNull(initialState); @@ -465,7 +476,7 @@ public ConnectorMetricsGroup(ConnectMetrics connectMetrics, AbstractStatus.State metricGroup.addImmutableValueMetric(registry.connectorType, connectorType()); metricGroup.addImmutableValueMetric(registry.connectorClass, connector.getClass().getName()); - metricGroup.addImmutableValueMetric(registry.connectorVersion, connector.version()); + metricGroup.addImmutableValueMetric(registry.connectorVersion, connectorVersion); metricGroup.addValueMetric(registry.connectorStatus, now -> state.toString().toLowerCase(Locale.getDefault())); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java index 4b8256115ed5d..1de9ff2d9a56e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java @@ -63,16 +63,15 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.Collectors; -import static java.util.Collections.singleton; import static org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG; /** @@ -105,6 +104,7 @@ class WorkerSinkTask extends WorkerTask, SinkReco private boolean committing; private boolean taskStopped; private final WorkerErrantRecordReporter workerErrantRecordReporter; + private final String version; public WorkerSinkTask(ConnectorTaskId id, SinkTask task, @@ -125,9 +125,10 @@ public WorkerSinkTask(ConnectorTaskId id, WorkerErrantRecordReporter workerErrantRecordReporter, StatusBackingStore statusBackingStore, Supplier>>> errorReportersSupplier, + TaskPluginsMetadata pluginsMetadata, Function pluginLoaderSwapper) { super(id, statusListener, initialState, loader, connectMetrics, errorMetrics, - retryWithToleranceOperator, transformationChain, errorReportersSupplier, time, statusBackingStore, pluginLoaderSwapper); + retryWithToleranceOperator, transformationChain, errorReportersSupplier, time, statusBackingStore, pluginsMetadata, pluginLoaderSwapper); this.workerConfig = workerConfig; this.task = task; @@ -153,6 +154,7 @@ public WorkerSinkTask(ConnectorTaskId id, this.isTopicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.taskStopped = false; this.workerErrantRecordReporter = workerErrantRecordReporter; + this.version = task.version(); } @Override @@ -227,6 +229,11 @@ public void execute() { } } + @Override + public String taskVersion() { + return version; + } + protected void iteration() { final long offsetCommitIntervalMs = workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG); @@ -359,12 +366,12 @@ boolean isCommitting() { //VisibleForTesting Map lastCommittedOffsets() { - return Collections.unmodifiableMap(lastCommittedOffsets); + return Map.copyOf(lastCommittedOffsets); } //VisibleForTesting Map currentOffsets() { - return Collections.unmodifiableMap(currentOffsets); + return Map.copyOf(currentOffsets); } private void doCommitSync(Map offsets, int seqno) { @@ -605,7 +612,7 @@ protected WorkerErrantRecordReporter workerErrantRecordReporter() { private void resumeAll() { for (TopicPartition tp : consumer.assignment()) if (!context.pausedPartitions().contains(tp)) - consumer.resume(singleton(tp)); + consumer.resume(Set.of(tp)); } private void pauseAll() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java index 11b8446b3d818..9b31b6e35d95c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java @@ -27,10 +27,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -117,7 +117,7 @@ public void pause(TopicPartition... partitions) { if (sinkTask.shouldPause()) { log.debug("{} Connector is paused, so not pausing consumer's partitions {}", this, partitions); } else { - consumer.pause(Arrays.asList(partitions)); + consumer.pause(List.of(partitions)); log.debug("{} Pausing partitions {}. Connector is not paused.", this, partitions); } } catch (IllegalStateException e) { @@ -131,12 +131,13 @@ public void resume(TopicPartition... partitions) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to resume consumption until the task is initialized"); } try { - pausedPartitions.removeAll(Arrays.asList(partitions)); + List partitionList = List.of(partitions); + partitionList.forEach(pausedPartitions::remove); if (sinkTask.shouldPause()) { - log.debug("{} Connector is paused, so not resuming consumer's partitions {}", this, partitions); + log.debug("{} Connector is paused, so not resuming consumer's partitions {}", this, partitionList); } else { - consumer.resume(Arrays.asList(partitions)); - log.debug("{} Resuming partitions: {}", this, partitions); + consumer.resume(partitionList); + log.debug("{} Resuming partitions: {}", this, partitionList); } } catch (IllegalStateException e) { throw new IllegalWorkerStateException("SinkTasks may not resume partitions that are not currently assigned to them.", e); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java index 0806e8877355b..3ccd530be3900 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java @@ -94,12 +94,13 @@ public WorkerSourceTask(ConnectorTaskId id, StatusBackingStore statusBackingStore, Executor closeExecutor, Supplier>> errorReportersSupplier, + TaskPluginsMetadata pluginsMetadata, Function pluginLoaderSwapper) { super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, null, producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, loader, - time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginLoaderSwapper); + time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginsMetadata, pluginLoaderSwapper); this.committableOffsets = CommittableOffsets.EMPTY; this.submittedRecords = new SubmittedRecords(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java index fa28a4e7b0ea9..1661d710a8659 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java @@ -41,6 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; @@ -94,9 +95,10 @@ public WorkerTask(ConnectorTaskId id, Supplier>> errorReportersSupplier, Time time, StatusBackingStore statusBackingStore, + TaskPluginsMetadata pluginsMetadata, Function pluginLoaderSwapper) { this.id = id; - this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener); + this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener, pluginsMetadata); this.errorMetrics = errorMetrics; this.statusListener = taskMetricsGroup; this.loader = loader; @@ -196,6 +198,8 @@ void doStart() { protected abstract void close(); + protected abstract String taskVersion(); + protected boolean isFailed() { return failed; } @@ -397,14 +401,25 @@ TaskMetricsGroup taskMetricsGroup() { static class TaskMetricsGroup implements TaskStatus.Listener { private final TaskStatus.Listener delegateListener; private final MetricGroup metricGroup; + private final List transformationGroups = new ArrayList<>(); + private final List predicateGroups = new ArrayList<>(); private final Time time; private final StateTracker taskStateTimer; private final Sensor commitTime; private final Sensor batchSize; private final Sensor commitAttempts; + private final ConnectMetrics connectMetrics; + private final ConnectorTaskId id; public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskStatus.Listener statusListener) { + this(id, connectMetrics, statusListener, null); + } + + public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskStatus.Listener statusListener, TaskPluginsMetadata pluginsMetadata) { delegateListener = statusListener; + this.connectMetrics = connectMetrics; + this.id = id; + time = connectMetrics.time(); taskStateTimer = new StateTracker(); ConnectMetricsRegistry registry = connectMetrics.registry(); @@ -434,6 +449,7 @@ public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskS Frequencies commitFrequencies = Frequencies.forBooleanValues(offsetCommitFailures, offsetCommitSucceeds); commitAttempts = metricGroup.sensor("offset-commit-completion"); commitAttempts.add(commitFrequencies); + addPluginInfoMetric(pluginsMetadata); } private void addRatioMetric(final State matchingState, MetricNameTemplate template) { @@ -442,8 +458,52 @@ private void addRatioMetric(final State matchingState, MetricNameTemplate templa taskStateTimer.durationRatio(matchingState, now)); } + private void addPluginInfoMetric(TaskPluginsMetadata pluginsMetadata) { + if (pluginsMetadata == null) { + return; + } + ConnectMetricsRegistry registry = connectMetrics.registry(); + metricGroup.addValueMetric(registry.taskConnectorClass, now -> pluginsMetadata.connectorClass()); + metricGroup.addValueMetric(registry.taskConnectorClassVersion, now -> pluginsMetadata.connectorVersion()); + metricGroup.addValueMetric(registry.taskConnectorType, now -> pluginsMetadata.connectorType()); + metricGroup.addValueMetric(registry.taskClass, now -> pluginsMetadata.taskClass()); + metricGroup.addValueMetric(registry.taskVersion, now -> pluginsMetadata.taskVersion()); + metricGroup.addValueMetric(registry.taskKeyConverterClass, now -> pluginsMetadata.keyConverterClass()); + metricGroup.addValueMetric(registry.taskKeyConverterVersion, now -> pluginsMetadata.keyConverterVersion()); + metricGroup.addValueMetric(registry.taskValueConverterClass, now -> pluginsMetadata.valueConverterClass()); + metricGroup.addValueMetric(registry.taskValueConverterVersion, now -> pluginsMetadata.valueConverterVersion()); + metricGroup.addValueMetric(registry.taskHeaderConverterClass, now -> pluginsMetadata.headerConverterClass()); + metricGroup.addValueMetric(registry.taskHeaderConverterVersion, now -> pluginsMetadata.headerConverterVersion()); + + if (!pluginsMetadata.transformations().isEmpty()) { + for (TransformationStage.AliasedPluginInfo entry : pluginsMetadata.transformations()) { + MetricGroup transformationGroup = connectMetrics.group(registry.transformsGroupName(), + registry.connectorTagName(), id.connector(), + registry.taskTagName(), Integer.toString(id.task()), + registry.transformsTagName(), entry.alias()); + transformationGroup.addValueMetric(registry.transformClass, now -> entry.className()); + transformationGroup.addValueMetric(registry.transformVersion, now -> entry.version()); + this.transformationGroups.add(transformationGroup); + } + } + + if (!pluginsMetadata.predicates().isEmpty()) { + for (TransformationStage.AliasedPluginInfo entry : pluginsMetadata.predicates()) { + MetricGroup predicateGroup = connectMetrics.group(registry.predicatesGroupName(), + registry.connectorTagName(), id.connector(), + registry.taskTagName(), Integer.toString(id.task()), + registry.predicateTagName(), entry.alias()); + predicateGroup.addValueMetric(registry.predicateClass, now -> entry.className()); + predicateGroup.addValueMetric(registry.predicateVersion, now -> entry.version()); + this.predicateGroups.add(predicateGroup); + } + } + } + void close() { metricGroup.close(); + transformationGroups.forEach(MetricGroup::close); + predicateGroups.forEach(MetricGroup::close); } void recordCommit(long duration, boolean success) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java index 1436460d1a913..d91e1fec85f46 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java @@ -32,12 +32,12 @@ public interface ConnectAssignor { * method computes an assignment of connectors and tasks among the members of the worker group. * * @param leaderId the leader of the group - * @param protocol the protocol type; for Connect assignors this is "eager", "compatible", or "sessioned" + * @param protocol the protocol type * @param allMemberMetadata the metadata of all the active workers of the group * @param coordinator the worker coordinator that runs this assignor * @return the assignment of connectors and tasks to workers */ - Map performAssignment(String leaderId, String protocol, + Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, List allMemberMetadata, WorkerCoordinator coordinator); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java index 2644e105d4dcd..6b29598ab1014 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java @@ -27,12 +27,12 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol; import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection; @@ -142,7 +142,7 @@ public static ByteBuffer serializeMetadata(WorkerState workerState) { * @return the collection of Connect protocol metadata */ public static JoinGroupRequestProtocolCollection metadataRequest(WorkerState workerState) { - return new JoinGroupRequestProtocolCollection(Collections.singleton( + return new JoinGroupRequestProtocolCollection(Set.of( new JoinGroupRequestProtocol() .setName(EAGER.protocol()) .setMetadata(ConnectProtocol.serializeMetadata(workerState).array())) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java index 16ab0d47a3c72..ccf33926bf93f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java @@ -35,7 +35,6 @@ import java.security.Security; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -120,7 +119,7 @@ public final class DistributedConfig extends WorkerConfig { */ public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = "worker.unsync.backoff.ms"; private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the worker is out of sync with other workers and " + - " fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining."; + " fails to catch up within the worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining."; public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000; public static final String CONFIG_STORAGE_PREFIX = "config.storage."; @@ -207,20 +206,20 @@ public final class DistributedConfig extends WorkerConfig { public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null; public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms"; - public static final String INTER_WORKER_KEY_TTL_MS_MS_DOC = "The TTL of generated session keys used for " + public static final String INTER_WORKER_KEY_TTL_MS_DOC = "The TTL of generated session keys used for " + "internal request validation (in milliseconds)"; - public static final int INTER_WORKER_KEY_TTL_MS_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1)); + public static final int INTER_WORKER_KEY_TTL_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1)); public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm"; public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256"; - public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests" - + "The algorithm '" + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + "' will be used as a default on JVMs that support it; " + public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests. " + + "The algorithm '" + INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT + "' will be used as a default on JVMs that support it; " + "on other JVMs, no default is used and a value for this property must be manually specified in the worker config."; public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms"; - public static final List INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = Collections.singletonList(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT); + public static final List INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = List.of(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT); public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests, " - + "which must include the algorithm used for the " + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + " property. " + + "which must include the algorithm used for the " + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + " property. " + "The algorithm(s) '" + INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT + "' will be used as a default on JVMs that provide them; " + "on other JVMs, no default is used and a value for this property must be manually specified in the worker config."; private final Crypto crypto; @@ -491,10 +490,10 @@ private static ConfigDef config(Crypto crypto) { SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC) .define(INTER_WORKER_KEY_TTL_MS_CONFIG, ConfigDef.Type.INT, - INTER_WORKER_KEY_TTL_MS_MS_DEFAULT, + INTER_WORKER_KEY_TTL_MS_DEFAULT, between(0, Integer.MAX_VALUE), ConfigDef.Importance.LOW, - INTER_WORKER_KEY_TTL_MS_MS_DOC) + INTER_WORKER_KEY_TTL_MS_DOC) .define(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, defaultKeyGenerationAlgorithm(crypto), diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java index 7ad7874406249..6c4bed311d36e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java @@ -82,9 +82,7 @@ import org.slf4j.Logger; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -305,7 +303,7 @@ public DistributedHerder(DistributedConfig config, this.restClient = restClient; this.isTopicTrackingEnabled = config.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.restNamespace = Objects.requireNonNull(restNamespace); - this.uponShutdown = Arrays.asList(uponShutdown); + this.uponShutdown = List.of(uponShutdown); String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); String clientId = clientIdConfig.isEmpty() ? "connect-" + workerId : clientIdConfig; @@ -713,7 +711,7 @@ private synchronized boolean updateConfigsWithIncrementalCooperative(AtomicRefer private void processConnectorConfigUpdates(Set connectorConfigUpdates) { // If we only have connector config updates, we can just bounce the updated connectors that are // currently assigned to this worker. - Set localConnectors = assignment == null ? Collections.emptySet() : new HashSet<>(assignment.connectors()); + Set localConnectors = assignment == null ? Set.of() : new HashSet<>(assignment.connectors()); Collection> connectorsToStart = new ArrayList<>(); log.trace("Processing connector config updates; " + "currently-owned connectors are {}, and to-be-updated connectors are {}", @@ -769,7 +767,7 @@ private void processTargetStateChanges(Set connectorTargetStateChanges) private void processTaskConfigUpdatesWithIncrementalCooperative(Set taskConfigUpdates) { Set localTasks = assignment == null - ? Collections.emptySet() + ? Set.of() : new HashSet<>(assignment.tasks()); log.trace("Processing task config updates with incremental cooperative rebalance protocol; " + "currently-owned tasks are {}, and to-be-updated tasks are {}", @@ -781,7 +779,7 @@ private void processTaskConfigUpdatesWithIncrementalCooperative(Set connectors) { Set localTasks = assignment == null - ? Collections.emptySet() + ? Set.of() : new HashSet<>(assignment.tasks()); List tasksToStop = localTasks.stream() @@ -966,7 +964,7 @@ private void validateSinkConnectorGroupId(Map config, Map new ConfigValue(overriddenConsumerGroupIdConfig, consumerGroupId, Collections.emptyList(), new ArrayList<>()) + p -> new ConfigValue(overriddenConsumerGroupIdConfig, consumerGroupId, List.of(), new ArrayList<>()) ); if (workerGroupId.equals(consumerGroupId)) { validatedGroupId.addErrorMessage("Consumer group " + consumerGroupId + @@ -1195,7 +1193,7 @@ public void stopConnector(final String connName, final Callback callback) // if the connector is reassigned during the ensuing rebalance, it is likely that it will immediately generate // a non-empty set of task configs). A STOPPED connector with a non-empty set of tasks is less acceptable // and likely to confuse users. - writeTaskConfigs(connName, Collections.emptyList()); + writeTaskConfigs(connName, List.of()); String stageDescription = "writing the STOPPED target stage for connector " + connName + " to the config topic"; try (TickThreadStage stage = new TickThreadStage(stageDescription)) { configBackingStore.putTargetState(connName, TargetState.STOPPED); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java index 0663d9e571052..1004382e1028d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java @@ -52,7 +52,7 @@ public EagerAssignor(LogContext logContext) { } @Override - public Map performAssignment(String leaderId, String protocol, + public Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, List allMemberMetadata, WorkerCoordinator coordinator) { log.debug("Performing task assignment"); @@ -132,13 +132,13 @@ private Map fillAssignmentsAndSerialize(Collection m Map groupAssignment = new HashMap<>(); for (String member : members) { - Collection connectors = connectorAssignments.get(member); + Collection connectors = connectorAssignments.getOrDefault(member, List.of()); if (connectors == null) { - connectors = Collections.emptyList(); + connectors = List.of(); } - Collection tasks = taskAssignments.get(member); + Collection tasks = taskAssignments.getOrDefault(member, List.of()); if (tasks == null) { - tasks = Collections.emptyList(); + tasks = List.of(); } Assignment assignment = new Assignment(error, leaderId, leaderUrl, maxOffset, connectors, tasks); log.debug("Assignment: {} -> {}", member, assignment); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java index d99b38349a88a..3c1f483b1f108 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; @@ -56,7 +55,7 @@ public class ExtendedAssignment extends ConnectProtocol.Assignment { private static final ExtendedAssignment EMPTY = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, null, null, -1, - Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), 0); + new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), 0); /** * Create an assignment indicating responsibility for the given connector instances and task Ids. @@ -167,7 +166,7 @@ private Map> revokedAsMap() { // Using LinkedHashMap preserves the ordering, which is helpful for tests and debugging Map> taskMap = new LinkedHashMap<>(); Optional.ofNullable(revokedConnectorIds) - .orElseGet(Collections::emptyList) + .orElseGet(List::of) .stream() .distinct() .forEachOrdered(connectorId -> { @@ -177,7 +176,7 @@ private Map> revokedAsMap() { }); Optional.ofNullable(revokedTaskIds) - .orElseGet(Collections::emptyList) + .orElseGet(List::of) .forEach(taskId -> { String connectorId = taskId.connector(); Collection connectorTasks = @@ -244,7 +243,7 @@ private static Collection extractConnectors(Struct struct, String key) { Object[] connectors = struct.getArray(key); if (connectors == null) { - return Collections.emptyList(); + return List.of(); } List connectorIds = new ArrayList<>(); for (Object structObj : connectors) { @@ -265,7 +264,7 @@ private static Collection extractTasks(Struct struct, String ke Object[] tasks = struct.getArray(key); if (tasks == null) { - return Collections.emptyList(); + return List.of(); } List tasksIds = new ArrayList<>(); for (Object structObj : tasks) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java index 6be247cfc8223..2b8f87c81c71f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java @@ -29,9 +29,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -89,7 +87,7 @@ public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxD this.candidateWorkersForReassignment = new LinkedHashSet<>(); this.delay = 0; this.previousGenerationId = -1; - this.previousMembers = Collections.emptySet(); + this.previousMembers = Set.of(); this.numSuccessiveRevokingRebalances = 0; // By default, initial interval is 1. The only corner case is when the user has set maxDelay to 0 // in which case, the exponential backoff delay should be 0 which would return the backoff delay to be 0 always @@ -97,7 +95,7 @@ public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxD } @Override - public Map performAssignment(String leaderId, String protocol, + public Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, List allMemberMetadata, WorkerCoordinator coordinator) { log.debug("Performing task assignment"); @@ -117,7 +115,7 @@ public Map performAssignment(String leaderId, String protoco log.debug("Max config offset root: {}, local snapshot config offsets root: {}", maxOffset, coordinator.configSnapshot().offset()); - short protocolVersion = ConnectProtocolCompatibility.fromProtocol(protocol).protocolVersion(); + short protocolVersion = protocol.protocolVersion(); Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator); if (leaderOffset == null) { @@ -472,7 +470,7 @@ protected void handleLostAssignments(ConnectorsAndTasks lostAssignments, if (scheduledRebalance > 0 && now >= scheduledRebalance) { // delayed rebalance expired and it's time to assign resources log.debug("Delayed rebalance expired. Reassigning lost tasks"); - List candidateWorkerLoad = Collections.emptyList(); + List candidateWorkerLoad = List.of(); if (!candidateWorkersForReassignment.isEmpty()) { candidateWorkerLoad = pickCandidateWorkerForReassignment(completeWorkerAssignment); } @@ -606,7 +604,7 @@ private static Map> diff(Map> ba Map> incremental = new HashMap<>(); for (Map.Entry> entry : base.entrySet()) { List values = new ArrayList<>(entry.getValue()); - values.removeAll(toSubtract.getOrDefault(entry.getKey(), Collections.emptySet())); + values.removeAll(toSubtract.getOrDefault(entry.getKey(), Set.of())); incremental.put(entry.getKey(), values); } return incremental; @@ -643,11 +641,11 @@ private Map performLoadBalancingRevocations( log.trace("No load-balancing revocations required; all workers are either new " + "or will have all currently-assigned connectors and tasks revoked during this round" ); - return Collections.emptyMap(); + return Map.of(); } if (configured.isEmpty()) { log.trace("No load-balancing revocations required; no connectors are currently configured on this cluster"); - return Collections.emptyMap(); + return Map.of(); } Map result = new HashMap<>(); @@ -705,7 +703,7 @@ private Map> loadBalancingRevocations( allocatedResourceName, allocatedResourceName ); - return Collections.emptyMap(); + return Map.of(); } Map> result = new HashMap<>(); @@ -887,12 +885,12 @@ static class ClusterAssignment { private final Set allWorkers; public static final ClusterAssignment EMPTY = new ClusterAssignment( - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap() + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of() ); public ClusterAssignment( @@ -910,7 +908,7 @@ public ClusterAssignment( this.allAssignedConnectors = allAssignedConnectors; this.allAssignedTasks = allAssignedTasks; this.allWorkers = combineCollections( - Arrays.asList(newlyAssignedConnectors, newlyAssignedTasks, newlyRevokedConnectors, newlyRevokedTasks, allAssignedConnectors, allAssignedTasks), + List.of(newlyAssignedConnectors, newlyAssignedTasks, newlyRevokedConnectors, newlyRevokedTasks, allAssignedConnectors, allAssignedTasks), Map::keySet, Collectors.toSet() ); @@ -921,7 +919,7 @@ public Map> newlyAssignedConnectors() { } public Collection newlyAssignedConnectors(String worker) { - return newlyAssignedConnectors.getOrDefault(worker, Collections.emptySet()); + return newlyAssignedConnectors.getOrDefault(worker, Set.of()); } public Map> newlyAssignedTasks() { @@ -929,7 +927,7 @@ public Map> newlyAssignedTasks() { } public Collection newlyAssignedTasks(String worker) { - return newlyAssignedTasks.getOrDefault(worker, Collections.emptySet()); + return newlyAssignedTasks.getOrDefault(worker, Set.of()); } public Map> newlyRevokedConnectors() { @@ -937,7 +935,7 @@ public Map> newlyRevokedConnectors() { } public Collection newlyRevokedConnectors(String worker) { - return newlyRevokedConnectors.getOrDefault(worker, Collections.emptySet()); + return newlyRevokedConnectors.getOrDefault(worker, Set.of()); } public Map> newlyRevokedTasks() { @@ -945,7 +943,7 @@ public Map> newlyRevokedTasks() { } public Collection newlyRevokedTasks(String worker) { - return newlyRevokedTasks.getOrDefault(worker, Collections.emptySet()); + return newlyRevokedTasks.getOrDefault(worker, Set.of()); } public Map> allAssignedConnectors() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java index 99daf19d1d90c..edb174ab52bed 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java @@ -36,7 +36,6 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashSet; @@ -229,9 +228,10 @@ protected Map onLeaderElected(String leaderId, if (skipAssignment) throw new IllegalStateException("Can't skip assignment because Connect does not support static membership."); - return ConnectProtocolCompatibility.fromProtocol(protocol) == EAGER - ? eagerAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this) - : incrementalAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this); + ConnectProtocolCompatibility protocolCompatibility = ConnectProtocolCompatibility.fromProtocol(protocol); + return protocolCompatibility == EAGER + ? eagerAssignor.performAssignment(leaderId, protocolCompatibility, allMemberMetadata, this) + : incrementalAssignor.performAssignment(leaderId, protocolCompatibility, allMemberMetadata, this); } @Override @@ -459,7 +459,7 @@ public String toString() { public static class ConnectorsAndTasks { public static final ConnectorsAndTasks EMPTY = - new ConnectorsAndTasks(Collections.emptyList(), Collections.emptyList()); + new ConnectorsAndTasks(List.of(), List.of()); private final Collection connectors; private final Collection tasks; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java index 3b9ec5993c66c..bb240af82d79c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java @@ -38,12 +38,11 @@ import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import static java.util.Collections.singleton; - /** * Write the original consumed record into a dead letter queue. The dead letter queue is a Kafka topic located * on the same cluster used by the worker to maintain internal topics. Each connector is typically configured @@ -85,7 +84,7 @@ public static DeadLetterQueueReporter createAndSetup(Map adminPr if (!admin.listTopics().names().get().contains(topic)) { log.error("Topic {} doesn't exist. Will attempt to create topic.", topic); NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor()); - admin.createTopics(singleton(schemaTopicRequest)).all().get(); + admin.createTopics(Set.of(schemaTopicRequest)).all().get(); } } catch (InterruptedException e) { throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java index 51f4e66d6362d..b8b74f12e5f1f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java @@ -26,7 +26,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -101,7 +100,7 @@ public RetryWithToleranceOperator(long errorRetryTimeout, long errorMaxDelayInMi this.errorHandlingMetrics = errorHandlingMetrics; this.stopRequestedLatch = stopRequestedLatch; this.stopping = false; - this.reporters = Collections.emptyList(); + this.reporters = List.of(); } /** @@ -137,7 +136,7 @@ public Future executeFailed(ProcessingContext context, Stage stage, Cla // Visible for testing synchronized Future report(ProcessingContext context) { if (reporters.size() == 1) { - return new WorkerErrantRecordReporter.ErrantRecordFuture(Collections.singletonList(reporters.iterator().next().report(context))); + return new WorkerErrantRecordReporter.ErrantRecordFuture(List.of(reporters.get(0).report(context))); } List> futures = reporters.stream() .map(r -> r.report(context)) @@ -357,7 +356,7 @@ public synchronized void close() { e.addSuppressed(t); } } - reporters = Collections.emptyList(); + reporters = List.of(); if (e != null) { throw e; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java index 9f3cd02175069..f89b1f03a75fd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java @@ -23,7 +23,6 @@ import java.net.URL; import java.net.URLClassLoader; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -259,7 +258,7 @@ private void verifyClasspathVersionedPlugin(String fullName, Class plugin, Ve fullName, pluginVersion, range - ), Collections.singletonList(pluginVersion)); + ), List.of(pluginVersion)); } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java index 693972c1989c5..d1829b731dc7d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java @@ -22,9 +22,11 @@ import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Collections; import java.util.Enumeration; +import java.util.List; import java.util.Objects; -import java.util.Vector; /** * A custom classloader dedicated to loading Connect plugin classes in classloading isolation. @@ -87,7 +89,7 @@ public URL getResource(String name) { @Override public Enumeration getResources(String name) throws IOException { Objects.requireNonNull(name); - Vector resources = new Vector<>(); + List resources = new ArrayList<>(); for (Enumeration foundLocally = findResources(name); foundLocally.hasMoreElements();) { URL url = foundLocally.nextElement(); if (url != null) @@ -99,7 +101,7 @@ public Enumeration getResources(String name) throws IOException { if (url != null) resources.add(url); } - return resources.elements(); + return Collections.enumeration(resources); } // This method needs to be thread-safe because it is supposed to be called by multiple diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java index 7d5105012b1a0..04be35f2d2603 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java @@ -26,7 +26,6 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; -import java.util.Arrays; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; @@ -67,7 +66,7 @@ public PluginScanResult( this.restExtensions = restExtensions; this.connectorClientConfigPolicies = connectorClientConfigPolicies; this.allPlugins = - Arrays.asList(sinkConnectors, sourceConnectors, converters, headerConverters, transformations, predicates, + List.of(sinkConnectors, sourceConnectors, converters, headerConverters, transformations, predicates, configProviders, restExtensions, connectorClientConfigPolicies); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java index 433c586b55e7f..729074d508e75 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java @@ -34,9 +34,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -204,7 +202,7 @@ public static boolean isClassFile(Path path) { public static Set pluginLocations(String pluginPath, boolean failFast) { if (pluginPath == null) { - return Collections.emptySet(); + return Set.of(); } String[] pluginPathElements = COMMA_WITH_WHITESPACE.split(pluginPath.trim(), -1); Set pluginLocations = new LinkedHashSet<>(); @@ -266,7 +264,7 @@ public static List pluginUrls(Path topPath) throws IOException { Set visited = new HashSet<>(); if (isArchive(topPath)) { - return Collections.singletonList(topPath); + return List.of(topPath); } DirectoryStream topListing = Files.newDirectoryStream( @@ -335,12 +333,12 @@ public static List pluginUrls(Path topPath) throws IOException { if (containsClassFiles) { if (archives.isEmpty()) { - return Collections.singletonList(topPath); + return List.of(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } - return Arrays.asList(archives.toArray(new Path[0])); + return List.copyOf(archives); } public static Set pluginSources(Set pluginLocations, ClassLoader classLoader, PluginClassLoaderFactory factory) { @@ -475,7 +473,7 @@ private static Collection forClassLoader(ClassLoader classLoader) { if (classLoader instanceof URLClassLoader) { URL[] urls = ((URLClassLoader) classLoader).getURLs(); if (urls != null) { - result.addAll(new HashSet<>(Arrays.asList(urls))); + result.addAll(new HashSet<>(List.of(urls))); } } classLoader = classLoader.getParent(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java index 97094bc89c835..daf9f2199922b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java @@ -42,10 +42,7 @@ import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -81,7 +78,7 @@ public Plugins(Map props) { } public PluginScanResult initLoaders(Set pluginSources, PluginDiscoveryMode discoveryMode) { - PluginScanResult empty = new PluginScanResult(Collections.emptyList()); + PluginScanResult empty = new PluginScanResult(List.of()); PluginScanResult serviceLoadingScanResult; try { serviceLoadingScanResult = discoveryMode.serviceLoad() ? @@ -94,7 +91,7 @@ public PluginScanResult initLoaders(Set pluginSources, PluginDisco } PluginScanResult reflectiveScanResult = discoveryMode.reflectivelyScan() ? new ReflectionScanner().discoverPlugins(pluginSources) : empty; - PluginScanResult scanResult = new PluginScanResult(Arrays.asList(reflectiveScanResult, serviceLoadingScanResult)); + PluginScanResult scanResult = new PluginScanResult(List.of(reflectiveScanResult, serviceLoadingScanResult)); maybeReportHybridDiscoveryIssue(discoveryMode, serviceLoadingScanResult, scanResult); delegatingLoader.installDiscoveredPlugins(scanResult); return scanResult; @@ -271,7 +268,7 @@ public String latestVersion(String classOrAlias, PluginType... allowedTypes) { public String pluginVersion(String classOrAlias, ClassLoader sourceLoader, PluginType... allowedTypes) { String location = (sourceLoader instanceof PluginClassLoader) ? ((PluginClassLoader) sourceLoader).location() : null; - PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, new HashSet<>(Arrays.asList(allowedTypes))); + PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, Set.of(allowedTypes)); if (desc != null) { return desc.version(); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java index a1ccf209e695a..da9dd0d2cabf3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java @@ -22,7 +22,6 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -77,12 +76,12 @@ public HeaderConverterPluginVersionRecommender headerConverterPluginVersionRecom return headerConverterPluginVersionRecommender; } - public TransformationPluginRecommender transformationPluginRecommender(String classOrAlias) { - return new TransformationPluginRecommender(classOrAlias); + public TransformationPluginRecommender transformationPluginRecommender(String classOrAliasConfig) { + return new TransformationPluginRecommender(classOrAliasConfig); } - public PredicatePluginRecommender predicatePluginRecommender(String classOrAlias) { - return new PredicatePluginRecommender(classOrAlias); + public PredicatePluginRecommender predicatePluginRecommender(String classOrAliasConfig) { + return new PredicatePluginRecommender(classOrAliasConfig); } public class ConnectorPluginVersionRecommender implements ConfigDef.Recommender { @@ -90,12 +89,12 @@ public class ConnectorPluginVersionRecommender implements ConfigDef.Recommender @Override public List validValues(String name, Map parsedConfig) { if (plugins == null) { - return Collections.emptyList(); + return List.of(); } String connectorClassOrAlias = (String) parsedConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); if (connectorClassOrAlias == null) { //should never happen - return Collections.emptyList(); + return List.of(); } List sourceConnectors = plugins.sourceConnectors(connectorClassOrAlias).stream() .map(PluginDesc::version).distinct().collect(Collectors.toList()); @@ -118,7 +117,7 @@ public class ConverterPluginRecommender implements ConfigDef.Recommender { @Override public List validValues(String name, Map parsedConfig) { if (plugins == null) { - return Collections.emptyList(); + return List.of(); } return plugins.converters().stream() .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); @@ -135,7 +134,7 @@ public class HeaderConverterPluginRecommender implements ConfigDef.Recommender { @Override public List validValues(String name, Map parsedConfig) { if (plugins == null) { - return Collections.emptyList(); + return List.of(); } return plugins.headerConverters().stream() .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); @@ -160,10 +159,10 @@ protected Function> recommendations() { @Override public List validValues(String name, Map parsedConfig) { if (plugins == null) { - return Collections.emptyList(); + return List.of(); } if (parsedConfig.get(converterConfig()) == null) { - return Collections.emptyList(); + return List.of(); } Class converterClass = (Class) parsedConfig.get(converterConfig()); return recommendations().apply(converterClass.getName()); @@ -221,10 +220,10 @@ public SMTPluginRecommender(String classOrAliasConfig) { @SuppressWarnings({"rawtypes"}) public List validValues(String name, Map parsedConfig) { if (plugins == null) { - return Collections.emptyList(); + return List.of(); } if (parsedConfig.get(classOrAliasConfig) == null) { - return Collections.emptyList(); + return List.of(); } Class classOrAlias = (Class) parsedConfig.get(classOrAliasConfig); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java index f5de82dab7385..db660f1651f74 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java @@ -26,9 +26,8 @@ import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.server.ResourceConfig; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.Map; public class ConnectRestServer extends RestServer { @@ -48,7 +47,7 @@ public void initializeResources(Herder herder) { @Override protected Collection> regularResources() { - return Arrays.asList( + return List.of( RootResource.class, ConnectorsResource.class, InternalConnectResource.class, @@ -58,9 +57,7 @@ protected Collection> regularResources() { @Override protected Collection> adminResources() { - return Collections.singletonList( - LoggingResource.class - ); + return List.of(LoggingResource.class); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java index 59b0254336312..5bbc3312aa791 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java @@ -57,7 +57,6 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Locale; @@ -97,7 +96,7 @@ public abstract class RestServer { private final Server jettyServer; private final RequestTimeout requestTimeout; - private List> connectRestExtensionPlugins = Collections.emptyList(); + private List> connectRestExtensionPlugins = List.of(); /** * Create a REST server for this herder using the specified configs. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java index 96993c37c5ce6..1a08a7eb123d4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java @@ -28,7 +28,6 @@ import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -54,7 +53,7 @@ public abstract class RestServerConfig extends AbstractConfig { " Leave hostname empty to bind to default interface.\n" + " Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084"; // Visible for testing - static final List LISTENERS_DEFAULT = Collections.singletonList("http://:8083"); + static final List LISTENERS_DEFAULT = List.of("http://:8083"); public static final String REST_ADVERTISED_HOST_NAME_CONFIG = "rest.advertised.host.name"; private static final String REST_ADVERTISED_HOST_NAME_DOC @@ -86,7 +85,8 @@ public abstract class RestServerConfig extends AbstractConfig { private static final String ADMIN_LISTENERS_DOC = "List of comma-separated URIs the Admin REST API will listen on." + " The supported protocols are HTTP and HTTPS." + " An empty or blank string will disable this feature." + - " The default behavior is to use the regular listener (specified by the 'listeners' property)."; + " The default behavior is to use the regular listener (specified by the 'listeners' property)." + + " A comma-separated list of valid URLs, e.g., http://localhost:8080,https://localhost:8443."; public static final String ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX = "admin.listeners.https."; public static final String REST_EXTENSION_CLASSES_CONFIG = "rest.extension.classes"; @@ -140,15 +140,15 @@ public abstract class RestServerConfig extends AbstractConfig { public static void addPublicConfig(ConfigDef configDef) { addInternalConfig(configDef); configDef - .define( - REST_EXTENSION_CLASSES_CONFIG, + .define(REST_EXTENSION_CLASSES_CONFIG, ConfigDef.Type.LIST, - "", - ConfigDef.Importance.LOW, REST_EXTENSION_CLASSES_DOC - ).define(ADMIN_LISTENERS_CONFIG, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.LOW, REST_EXTENSION_CLASSES_DOC) + .define(ADMIN_LISTENERS_CONFIG, ConfigDef.Type.LIST, null, - new AdminListenersValidator(), + ConfigDef.ValidList.anyNonDuplicateValues(true, true), ConfigDef.Importance.LOW, ADMIN_LISTENERS_DOC); } @@ -329,37 +329,6 @@ public String toString() { } } - private static class AdminListenersValidator implements ConfigDef.Validator { - @Override - public void ensureValid(String name, Object value) { - if (value == null) { - return; - } - - if (!(value instanceof List items)) { - throw new ConfigException("Invalid value type for admin.listeners (expected list)."); - } - - if (items.isEmpty()) { - return; - } - - for (Object item : items) { - if (!(item instanceof String)) { - throw new ConfigException("Invalid type for admin.listeners (expected String)."); - } - if (Utils.isBlank((String) item)) { - throw new ConfigException("Empty URL found when parsing admin.listeners list."); - } - } - } - - @Override - public String toString() { - return "List of comma-separated URLs, ex: http://localhost:8080,https://localhost:8443."; - } - } - private static class ResponseHttpHeadersValidator implements ConfigDef.Validator { @Override public void ensureValid(String name, Object value) { @@ -391,7 +360,7 @@ private static ConfigDef config() { @Override public List adminListeners() { // Disable admin resources (such as the logging resource) - return Collections.emptyList(); + return List.of(); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java index b011fba993403..71d0d18cb861a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java @@ -16,50 +16,10 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class ConfigInfo { - - private final ConfigKeyInfo configKey; - private final ConfigValueInfo configValue; - - @JsonCreator - public ConfigInfo( - @JsonProperty("definition") ConfigKeyInfo configKey, - @JsonProperty("value") ConfigValueInfo configValue) { - this.configKey = configKey; - this.configValue = configValue; - } - - @JsonProperty("definition") - public ConfigKeyInfo configKey() { - return configKey; - } - - @JsonProperty("value") - public ConfigValueInfo configValue() { - return configValue; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConfigInfo that = (ConfigInfo) o; - return Objects.equals(configKey, that.configKey) && - Objects.equals(configValue, that.configValue); - } - - @Override - public int hashCode() { - return Objects.hash(configKey, configValue); - } - - @Override - public String toString() { - return "[" + configKey + "," + configValue + "]"; - } +public record ConfigInfo( + @JsonProperty("definition") ConfigKeyInfo configKey, + @JsonProperty("value") ConfigValueInfo configValue +) { } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java index dd075b5f90df8..28171d8404278 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java @@ -16,84 +16,14 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; -import java.util.Objects; -public class ConfigInfos { - - @JsonProperty("name") - private final String name; - - @JsonProperty("error_count") - private final int errorCount; - - @JsonProperty("groups") - private final List groups; - - @JsonProperty("configs") - private final List configs; - - @JsonCreator - public ConfigInfos(@JsonProperty("name") String name, - @JsonProperty("error_count") int errorCount, - @JsonProperty("groups") List groups, - @JsonProperty("configs") List configs) { - this.name = name; - this.groups = groups; - this.errorCount = errorCount; - this.configs = configs; - } - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public List groups() { - return groups; - } - - @JsonProperty("error_count") - public int errorCount() { - return errorCount; - } - - @JsonProperty("configs") - public List values() { - return configs; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConfigInfos that = (ConfigInfos) o; - return Objects.equals(name, that.name) && - Objects.equals(errorCount, that.errorCount) && - Objects.equals(groups, that.groups) && - Objects.equals(configs, that.configs); - } - - @Override - public int hashCode() { - return Objects.hash(name, errorCount, groups, configs); - } - - @Override - public String toString() { - return "[" + - name + - "," + - errorCount + - "," + - groups + - "," + - configs + - "]"; - } - -} +public record ConfigInfos( + @JsonProperty("name") String name, + @JsonProperty("error_count") int errorCount, + @JsonProperty("groups") List groups, + @JsonProperty("configs") List configs +) { +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java index 0b1a41c212ed9..2d3a3f93be151 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java @@ -16,153 +16,21 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; -import java.util.Objects; -public class ConfigKeyInfo { - - private final String name; - private final String type; - private final boolean required; - private final String defaultValue; - private final String importance; - private final String documentation; - private final String group; - private final int orderInGroup; - private final String width; - private final String displayName; - private final List dependents; - - @JsonCreator - public ConfigKeyInfo(@JsonProperty("name") String name, - @JsonProperty("type") String type, - @JsonProperty("required") boolean required, - @JsonProperty("default_value") String defaultValue, - @JsonProperty("importance") String importance, - @JsonProperty("documentation") String documentation, - @JsonProperty("group") String group, - @JsonProperty("order_in_group") int orderInGroup, - @JsonProperty("width") String width, - @JsonProperty("display_name") String displayName, - @JsonProperty("dependents") List dependents) { - this.name = name; - this.type = type; - this.required = required; - this.defaultValue = defaultValue; - this.importance = importance; - this.documentation = documentation; - this.group = group; - this.orderInGroup = orderInGroup; - this.width = width; - this.displayName = displayName; - this.dependents = dependents; - } - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public String type() { - return type; - } - - @JsonProperty - public boolean required() { - return required; - } - - @JsonProperty("default_value") - public String defaultValue() { - return defaultValue; - } - - @JsonProperty - public String documentation() { - return documentation; - } - - @JsonProperty - public String group() { - return group; - } - - @JsonProperty("order") - public int orderInGroup() { - return orderInGroup; - } - - @JsonProperty - public String width() { - return width; - } - - @JsonProperty - public String importance() { - return importance; - } - - @JsonProperty("display_name") - public String displayName() { - return displayName; - } - - @JsonProperty - public List dependents() { - return dependents; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConfigKeyInfo that = (ConfigKeyInfo) o; - return Objects.equals(name, that.name) && - Objects.equals(type, that.type) && - Objects.equals(required, that.required) && - Objects.equals(defaultValue, that.defaultValue) && - Objects.equals(importance, that.importance) && - Objects.equals(documentation, that.documentation) && - Objects.equals(group, that.group) && - Objects.equals(orderInGroup, that.orderInGroup) && - Objects.equals(width, that.width) && - Objects.equals(displayName, that.displayName) && - Objects.equals(dependents, that.dependents); - } - - @Override - public int hashCode() { - return Objects.hash(name, type, required, defaultValue, importance, documentation, group, orderInGroup, width, displayName, dependents); - } - - @Override - public String toString() { - return "[" + - name + - "," + - type + - "," + - required + - "," + - defaultValue + - "," + - importance + - "," + - documentation + - "," + - group + - "," + - orderInGroup + - "," + - width + - "," + - displayName + - "," + - dependents + - "]"; - } +public record ConfigKeyInfo( + @JsonProperty("name") String name, + @JsonProperty("type") String type, + @JsonProperty("required") boolean required, + @JsonProperty("default_value") String defaultValue, + @JsonProperty("importance") String importance, + @JsonProperty("documentation") String documentation, + @JsonProperty("group") String group, + @JsonProperty("order_in_group") int orderInGroup, + @JsonProperty("width") String width, + @JsonProperty("display_name") String displayName, + @JsonProperty("dependents") List dependents +) { } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java index a5528730e22fc..7eb5c71d3b3a8 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java @@ -16,88 +16,15 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; -import java.util.Objects; -public class ConfigValueInfo { - private final String name; - private final String value; - private final List recommendedValues; - private final List errors; - private final boolean visible; - - @JsonCreator - public ConfigValueInfo( - @JsonProperty("name") String name, - @JsonProperty("value") String value, - @JsonProperty("recommended_values") List recommendedValues, - @JsonProperty("errors") List errors, - @JsonProperty("visible") boolean visible) { - this.name = name; - this.value = value; - this.recommendedValues = recommendedValues; - this.errors = errors; - this.visible = visible; - } - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public String value() { - return value; - } - - @JsonProperty("recommended_values") - public List recommendedValues() { - return recommendedValues; - } - - @JsonProperty - public List errors() { - return errors; - } - - @JsonProperty - public boolean visible() { - return visible; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConfigValueInfo that = (ConfigValueInfo) o; - return Objects.equals(name, that.name) && - Objects.equals(value, that.value) && - Objects.equals(recommendedValues, that.recommendedValues) && - Objects.equals(errors, that.errors) && - Objects.equals(visible, that.visible); - } - - @Override - public int hashCode() { - return Objects.hash(name, value, recommendedValues, errors, visible); - } - - @Override - public String toString() { - return "[" + - name + - "," + - value + - "," + - recommendedValues + - "," + - errors + - "," + - visible + - "]"; - } - -} +public record ConfigValueInfo( + @JsonProperty("name") String name, + @JsonProperty("value") String value, + @JsonProperty("recommended_values") List recommendedValues, + @JsonProperty("errors") List errors, + @JsonProperty("visible") boolean visible +) { +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java index cb9b26ff040e9..775268677e540 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java @@ -18,66 +18,15 @@ import org.apache.kafka.connect.util.ConnectorTaskId; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; import java.util.Map; -import java.util.Objects; -public class ConnectorInfo { - - private final String name; - private final Map config; - private final List tasks; - private final ConnectorType type; - - @JsonCreator - public ConnectorInfo(@JsonProperty("name") String name, - @JsonProperty("config") Map config, - @JsonProperty("tasks") List tasks, - @JsonProperty("type") ConnectorType type) { - this.name = name; - this.config = config; - this.tasks = tasks; - this.type = type; - } - - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public ConnectorType type() { - return type; - } - - @JsonProperty - public Map config() { - return config; - } - - @JsonProperty - public List tasks() { - return tasks; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ConnectorInfo that = (ConnectorInfo) o; - return Objects.equals(name, that.name) && - Objects.equals(config, that.config) && - Objects.equals(tasks, that.tasks) && - Objects.equals(type, that.type); - } - - @Override - public int hashCode() { - return Objects.hash(name, config, tasks, type); - } - -} +public record ConnectorInfo( + @JsonProperty("name") String name, + @JsonProperty("config") Map config, + @JsonProperty("tasks") List tasks, + @JsonProperty("type") ConnectorType type +) { +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java index bf15772f7fb51..2420c99177987 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java @@ -16,11 +16,9 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Map; -import java.util.Objects; /** * Represents a single {partition, offset} pair for either a sink connector or a source connector. For source connectors, @@ -38,49 +36,15 @@ * } * */ -public class ConnectorOffset { - - private final Map partition; - private final Map offset; - - @JsonCreator - public ConnectorOffset(@JsonProperty("partition") Map partition, @JsonProperty("offset") Map offset) { - this.partition = partition; - this.offset = offset; - } - - @JsonProperty - public Map partition() { - return partition; - } - - @JsonProperty - public Map offset() { - return offset; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof ConnectorOffset that)) { - return false; - } - return Objects.equals(this.partition, that.partition) && - Objects.equals(this.offset, that.offset); - } - - @Override - public int hashCode() { - return Objects.hash(partition, offset); - } - +public record ConnectorOffset( + @JsonProperty("partition") Map partition, + @JsonProperty("offset") Map offset +) { @Override public String toString() { return "{" + - "partition=" + partition + - ", offset=" + offset + - '}'; + "partition=" + partition + + ", offset=" + offset + + '}'; } -} +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java index cae87914941dc..c0e6b33e0dc37 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java @@ -18,7 +18,6 @@ import org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.HashMap; @@ -51,19 +50,9 @@ * @see ConnectorsResource#getOffsets * @see ConnectorsResource#alterConnectorOffsets */ -public class ConnectorOffsets { - private final List offsets; - - @JsonCreator - public ConnectorOffsets(@JsonProperty("offsets") List offsets) { - this.offsets = offsets; - } - - @JsonProperty - public List offsets() { - return offsets; - } - +public record ConnectorOffsets( + @JsonProperty("offsets") List offsets +) { public Map, Map> toMap() { Map, Map> partitionOffsetMap = new HashMap<>(); for (ConnectorOffset offset : offsets) { @@ -72,24 +61,8 @@ public List offsets() { return partitionOffsetMap; } - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof ConnectorOffsets that)) { - return false; - } - return Objects.equals(this.offsets, that.offsets); - } - - @Override - public int hashCode() { - return Objects.hashCode(offsets); - } - @Override public String toString() { return Objects.toString(offsets); } -} +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java index 82d9957b40db1..fe53e65e37953 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java @@ -23,53 +23,24 @@ import java.util.List; import java.util.Objects; -public class ConnectorStateInfo { - - private final String name; - private final ConnectorState connector; - private final List tasks; - private final ConnectorType type; - - @JsonCreator - public ConnectorStateInfo(@JsonProperty("name") String name, - @JsonProperty("connector") ConnectorState connector, - @JsonProperty("tasks") List tasks, - @JsonProperty("type") ConnectorType type) { - this.name = name; - this.connector = connector; - this.tasks = tasks; - this.type = type; - } - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public ConnectorState connector() { - return connector; - } - - @JsonProperty - public List tasks() { - return tasks; - } - - @JsonProperty - public ConnectorType type() { - return type; - } +public record ConnectorStateInfo( + @JsonProperty String name, + @JsonProperty ConnectorState connector, + @JsonProperty List tasks, + @JsonProperty ConnectorType type +) { public abstract static class AbstractState { private final String state; private final String trace; private final String workerId; + private final String version; - public AbstractState(String state, String workerId, String trace) { + public AbstractState(String state, String workerId, String trace, String version) { this.state = state; this.workerId = workerId; this.trace = trace; + this.version = version; } @JsonProperty @@ -87,14 +58,21 @@ public String workerId() { public String trace() { return trace; } + + @JsonProperty + @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = PluginInfo.NoVersionFilter.class) + public String version() { + return version; + } } public static class ConnectorState extends AbstractState { @JsonCreator public ConnectorState(@JsonProperty("state") String state, @JsonProperty("worker_id") String worker, - @JsonProperty("msg") String msg) { - super(state, worker, msg); + @JsonProperty("msg") String msg, + @JsonProperty("version") String version) { + super(state, worker, msg, version); } } @@ -105,8 +83,9 @@ public static class TaskState extends AbstractState implements Comparable config; - private final InitialState initialState; - - @JsonCreator - public CreateConnectorRequest(@JsonProperty("name") String name, @JsonProperty("config") Map config, - @JsonProperty("initial_state") InitialState initialState) { - this.name = name; - this.config = config; - this.initialState = initialState; - } - - @JsonProperty - public String name() { - return name; - } - - @JsonProperty - public Map config() { - return config; - } - - @JsonProperty("initial_state") - public InitialState initialState() { - return initialState; - } +public record CreateConnectorRequest( + @JsonProperty("name") String name, + @JsonProperty("config") Map config, + @JsonProperty("initial_state") InitialState initialState +) { public TargetState initialTargetState() { - if (initialState != null) { - return initialState.toTargetState(); - } else { - return null; - } - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CreateConnectorRequest that = (CreateConnectorRequest) o; - return Objects.equals(name, that.name) && - Objects.equals(config, that.config) && - Objects.equals(initialState, that.initialState); - } - - @Override - public int hashCode() { - return Objects.hash(name, config, initialState); + return initialState != null ? initialState.toTargetState() : null; } public enum InitialState { @@ -87,16 +44,11 @@ public static InitialState forValue(String value) { } public TargetState toTargetState() { - switch (this) { - case RUNNING: - return TargetState.STARTED; - case PAUSED: - return TargetState.PAUSED; - case STOPPED: - return TargetState.STOPPED; - default: - throw new IllegalArgumentException("Unknown initial state: " + this); - } + return switch (this) { + case RUNNING -> TargetState.STARTED; + case PAUSED -> TargetState.PAUSED; + case STOPPED -> TargetState.STOPPED; + }; } } -} +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java index ecc4de56cd4bb..67012ebece7ed 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java @@ -16,47 +16,15 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - /** * Standard error format for all REST API failures. These are generated automatically by * {@link org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper} in response to uncaught * {@link org.apache.kafka.connect.errors.ConnectException}s. */ -public class ErrorMessage { - private final int errorCode; - private final String message; - - @JsonCreator - public ErrorMessage(@JsonProperty("error_code") int errorCode, @JsonProperty("message") String message) { - this.errorCode = errorCode; - this.message = message; - } - - @JsonProperty("error_code") - public int errorCode() { - return errorCode; - } - - @JsonProperty - public String message() { - return message; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ErrorMessage that = (ErrorMessage) o; - return Objects.equals(errorCode, that.errorCode) && - Objects.equals(message, that.message); - } - - @Override - public int hashCode() { - return Objects.hash(errorCode, message); - } -} +public record ErrorMessage( + @JsonProperty("error_code") int errorCode, + @JsonProperty String message +) { +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java index 4a9a6be32e539..0f3ce9a7c38e6 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java @@ -20,49 +20,11 @@ import java.util.Objects; -public class LoggerLevel { - - private final String level; - private final Long lastModified; - - public LoggerLevel( - @JsonProperty("level") String level, - @JsonProperty("last_modified") Long lastModified - ) { - this.level = Objects.requireNonNull(level, "level may not be null"); - this.lastModified = lastModified; - } - - @JsonProperty - public String level() { - return level; - } - - @JsonProperty("last_modified") - public Long lastModified() { - return lastModified; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - LoggerLevel that = (LoggerLevel) o; - return level.equals(that.level) && Objects.equals(lastModified, that.lastModified); - } - - @Override - public int hashCode() { - return Objects.hash(level, lastModified); - } - - @Override - public String toString() { - return "LoggerLevel{" - + "level='" + level + '\'' - + ", lastModified=" + lastModified - + '}'; +public record LoggerLevel( + @JsonProperty String level, + @JsonProperty("last_modified") Long lastModified +) { + public LoggerLevel { + Objects.requireNonNull(level, "level may not be null"); } -} +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java index f1f47e53c4445..3c84b44b1d57a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java @@ -16,11 +16,8 @@ */ package org.apache.kafka.connect.runtime.rest.entities; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - /** * Standard format for regular successful REST API responses that look like: *
    @@ -29,32 +26,5 @@
      *     }
      * 
    */ -public class Message { - private final String message; - - @JsonCreator - public Message(@JsonProperty("message") String message) { - this.message = message; - } - - @JsonProperty - public String message() { - return message; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof Message that)) { - return false; - } - return Objects.equals(this.message, that.message); - } - - @Override - public int hashCode() { - return message.hashCode(); - } +public record Message(@JsonProperty String message) { } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java index cd8f3c614a74e..2f21bf9abd4d1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java @@ -19,77 +19,23 @@ import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.apache.kafka.connect.runtime.isolation.PluginType; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class PluginInfo { - private final String className; - private final PluginType type; - private final String version; - - @JsonCreator - public PluginInfo( - @JsonProperty("class") String className, - @JsonProperty("type") PluginType type, - @JsonProperty("version") String version - ) { - this.className = className; - this.type = type; - this.version = version; - } - - public PluginInfo(PluginDesc plugin) { - this(plugin.className(), plugin.type(), plugin.version()); - } - - @JsonProperty("class") - public String className() { - return className; - } - - @JsonProperty("type") - public String type() { - return type.toString(); - } - +public record PluginInfo( + @JsonProperty("class") String className, + @JsonProperty("type") PluginType type, @JsonProperty("version") @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = NoVersionFilter.class) - public String version() { - return version; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - PluginInfo that = (PluginInfo) o; - return Objects.equals(className, that.className) && - Objects.equals(type, that.type) && - Objects.equals(version, that.version); - } - - @Override - public int hashCode() { - return Objects.hash(className, type, version); - } - - @Override - public String toString() { - return "PluginInfo{" + "className='" + className + '\'' + - ", type=" + type.toString() + - ", version='" + version + '\'' + - '}'; + String version +) { + public PluginInfo(PluginDesc plugin) { + this(plugin.className(), plugin.type(), plugin.version()); } public static final class NoVersionFilter { - // This method is used by Jackson to filter the version field for plugins that don't have a version + // Used by Jackson to filter out undefined versions + @Override public boolean equals(Object obj) { return PluginDesc.UNDEFINED_VERSION.equals(obj); } @@ -100,4 +46,4 @@ public int hashCode() { return super.hashCode(); } } -} +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java index cc5ae3577f0d3..b4d78b7ee8921 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java @@ -18,43 +18,12 @@ import org.apache.kafka.connect.util.ConnectorTaskId; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Map; -import java.util.Objects; -public class TaskInfo { - private final ConnectorTaskId id; - private final Map config; - - @JsonCreator - public TaskInfo(@JsonProperty("id") ConnectorTaskId id, @JsonProperty("config") Map config) { - this.id = id; - this.config = config; - } - - @JsonProperty - public ConnectorTaskId id() { - return id; - } - - @JsonProperty - public Map config() { - return config; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TaskInfo taskInfo = (TaskInfo) o; - return Objects.equals(id, taskInfo.id) && - Objects.equals(config, taskInfo.config); - } - - @Override - public int hashCode() { - return Objects.hash(id, config); - } -} +public record TaskInfo( + @JsonProperty("id") ConnectorTaskId id, + @JsonProperty("config") Map config +) { +} \ No newline at end of file diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java index 640bcc5b82286..8637e79087cab 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java @@ -143,7 +143,8 @@ public List listConnectorPlugins( synchronized (this) { if (connectorsOnly) { return connectorPlugins.stream() - .filter(p -> PluginType.SINK.toString().equals(p.type()) || PluginType.SOURCE.toString().equals(p.type())).toList(); + .filter(p -> p.type() == PluginType.SINK || p.type() == PluginType.SOURCE) + .toList(); } else { return List.copyOf(connectorPlugins); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java index c498f309dd099..148e96a4cee13 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java @@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory; import java.net.URI; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -191,7 +190,7 @@ public Response getConnectorActiveTopics(final @PathParam("connector") String co "Topic tracking is disabled."); } ActiveTopicsInfo info = herder.connectorActiveTopics(connector); - return Response.ok(Collections.singletonMap(info.connector(), info)).build(); + return Response.ok(Map.of(info.connector(), info)).build(); } @PUT diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java index 8f51b6e1b942c..83a175e8d5fff 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java @@ -25,7 +25,6 @@ import org.eclipse.jetty.util.ssl.SslContextFactory; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.regex.Pattern; @@ -123,8 +122,10 @@ protected static void configureSslContextFactoryTrustStore(SslContextFactory ssl */ @SuppressWarnings("unchecked") protected static void configureSslContextFactoryAlgorithms(SslContextFactory ssl, Map sslConfigValues) { - List sslEnabledProtocols = (List) getOrDefault(sslConfigValues, SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList(COMMA_WITH_WHITESPACE.split(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS))); - ssl.setIncludeProtocols(sslEnabledProtocols.toArray(new String[0])); + List sslEnabledProtocols = (List) getOrDefault(sslConfigValues, SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(COMMA_WITH_WHITESPACE.split(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS))); + + if (!sslEnabledProtocols.isEmpty()) + ssl.setIncludeProtocols(sslEnabledProtocols.toArray(new String[0])); String sslProvider = (String) sslConfigValues.get(SslConfigs.SSL_PROVIDER_CONFIG); if (sslProvider != null) @@ -133,7 +134,8 @@ protected static void configureSslContextFactoryAlgorithms(SslContextFactory ssl ssl.setProtocol((String) getOrDefault(sslConfigValues, SslConfigs.SSL_PROTOCOL_CONFIG, SslConfigs.DEFAULT_SSL_PROTOCOL)); List sslCipherSuites = (List) sslConfigValues.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); - if (sslCipherSuites != null) + + if (!sslCipherSuites.isEmpty()) ssl.setIncludeCipherSuites(sslCipherSuites.toArray(new String[0])); ssl.setKeyManagerFactoryAlgorithm((String) getOrDefault(sslConfigValues, SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM)); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java index fc2327a1bf7c6..5626fbc809db5 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java @@ -23,7 +23,6 @@ import org.apache.kafka.connect.util.ConnectorTaskId; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -37,15 +36,15 @@ public class ClusterConfigState { public static final ClusterConfigState EMPTY = new ClusterConfigState( NO_OFFSET, null, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of()); private final long offset; private final SessionKey sessionKey; @@ -232,12 +231,12 @@ public boolean pendingFencing(String connectorName) { */ public List tasks(String connectorName) { if (inconsistentConnectors.contains(connectorName)) { - return Collections.emptyList(); + return List.of(); } Integer numTasks = connectorTaskCounts.get(connectorName); if (numTasks == null) { - return Collections.emptyList(); + return List.of(); } List taskIds = new ArrayList<>(numTasks); @@ -245,7 +244,7 @@ public List tasks(String connectorName) { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex); taskIds.add(taskId); } - return Collections.unmodifiableList(taskIds); + return List.copyOf(taskIds); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java index 85d241bf7f21f..99e7f94fc5725 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java @@ -28,7 +28,6 @@ import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -398,7 +397,7 @@ private LoggingContext loggingContext() { } private static Future> getFromStore(Optional store, Collection keys) { - return store.map(s -> s.get(keys)).orElseGet(() -> CompletableFuture.completedFuture(Collections.emptyMap())); + return store.map(s -> s.get(keys)).orElseGet(() -> CompletableFuture.completedFuture(Map.of())); } private class ChainedOffsetWriteFuture implements Future { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java index 59caa61266048..200e5e0b48f90 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java @@ -31,7 +31,6 @@ import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.NoSuchFileException; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -112,6 +111,6 @@ protected void save() { @Override public Set> connectorPartitions(String connectorName) { - return connectorPartitions.getOrDefault(connectorName, Collections.emptySet()); + return connectorPartitions.getOrDefault(connectorName, Set.of()); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java index 1cd10c793517e..0e425301c1111 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java @@ -62,7 +62,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -545,7 +544,7 @@ public void removeConnectorConfig(String connector) { log.debug("Removing connector configuration for connector '{}'", connector); try { Timer timer = time.timer(READ_WRITE_TOTAL_TIMEOUT_MS); - List keyValues = Arrays.asList( + List keyValues = List.of( new ProducerKeyValue(CONNECTOR_KEY(connector), null), new ProducerKeyValue(TARGET_STATE_KEY(connector), null) ); @@ -792,7 +791,7 @@ KafkaBasedLog setupAndCreateKafkaBasedLog(String topic, final Wo Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).configStorageTopicSettings() - : Collections.emptyMap(); + : Map.of(); NewTopic topicDescription = TopicAdmin.defineTopic(topic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -811,7 +810,7 @@ KafkaBasedLog setupAndCreateKafkaBasedLog(String topic, final Wo * @param timer Timer bounding how long this method can block. The timer is updated before the method returns. */ private void sendPrivileged(String key, byte[] value, Timer timer) throws ExecutionException, InterruptedException, TimeoutException { - sendPrivileged(Collections.singletonList(new ProducerKeyValue(key, value)), timer); + sendPrivileged(List.of(new ProducerKeyValue(key, value)), timer); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java index 7ea2691e0fa8f..7920b3d6e0c0a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java @@ -45,7 +45,6 @@ import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -218,7 +217,7 @@ public void configure(final WorkerConfig config) { protected NewTopic newTopicDescription(final String topic, final WorkerConfig config) { Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).offsetStorageTopicSettings() - : Collections.emptyMap(); + : Map.of(); return TopicAdmin.defineTopic(topic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -297,7 +296,7 @@ public Future set(final Map values, final Callback @Override public Set> connectorPartitions(String connectorName) { - return connectorPartitions.getOrDefault(connectorName, Collections.emptySet()); + return connectorPartitions.getOrDefault(connectorName, Set.of()); } protected final Callback> consumedCallback = (error, record) -> { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java index 0a9e383700605..8de8d9ee18a81 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java @@ -53,7 +53,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -101,6 +100,7 @@ public class KafkaStatusBackingStore extends KafkaTopicBasedBackingStore impleme public static final String TRACE_KEY_NAME = "trace"; public static final String WORKER_ID_KEY_NAME = "worker_id"; public static final String GENERATION_KEY_NAME = "generation"; + public static final String VERSION_KEY_NAME = "version"; public static final String TOPIC_STATE_KEY = "topic"; public static final String TOPIC_NAME_KEY = "name"; @@ -113,6 +113,7 @@ public class KafkaStatusBackingStore extends KafkaTopicBasedBackingStore impleme .field(TRACE_KEY_NAME, SchemaBuilder.string().optional().build()) .field(WORKER_ID_KEY_NAME, Schema.STRING_SCHEMA) .field(GENERATION_KEY_NAME, Schema.INT32_SCHEMA) + .field(VERSION_KEY_NAME, Schema.OPTIONAL_STRING_SCHEMA) .build(); private static final Schema TOPIC_STATUS_VALUE_SCHEMA_V0 = SchemaBuilder.struct() @@ -197,7 +198,7 @@ public void configure(final WorkerConfig config) { Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).statusStorageTopicSettings() - : Collections.emptyMap(); + : Map.of(); NewTopic topicDescription = TopicAdmin.defineTopic(statusTopic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -400,8 +401,8 @@ public TopicStatus getTopic(String connector, String topic) { public Collection getAllTopics(String connector) { ConcurrentMap activeTopics = topics.get(Objects.requireNonNull(connector)); return activeTopics != null - ? Collections.unmodifiableCollection(Objects.requireNonNull(activeTopics.values())) - : Collections.emptySet(); + ? Set.copyOf(Objects.requireNonNull(activeTopics.values())) + : Set.of(); } @Override @@ -428,7 +429,8 @@ private ConnectorStatus parseConnectorStatus(String connector, byte[] data) { String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); - return new ConnectorStatus(connector, state, trace, workerUrl, generation); + String version = (String) statusMap.get(VERSION_KEY_NAME); + return new ConnectorStatus(connector, state, trace, workerUrl, generation, version); } catch (Exception e) { log.error("Failed to deserialize connector status", e); return null; @@ -448,7 +450,8 @@ private TaskStatus parseTaskStatus(ConnectorTaskId taskId, byte[] data) { String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); - return new TaskStatus(taskId, state, workerUrl, generation, trace); + String version = (String) statusMap.get(VERSION_KEY_NAME); + return new TaskStatus(taskId, state, workerUrl, generation, trace, version); } catch (Exception e) { log.error("Failed to deserialize task status", e); return null; @@ -487,6 +490,7 @@ private byte[] serialize(AbstractStatus status) { struct.put(TRACE_KEY_NAME, status.trace()); struct.put(WORKER_ID_KEY_NAME, status.workerId()); struct.put(GENERATION_KEY_NAME, status.generation()); + struct.put(VERSION_KEY_NAME, status.version()); return converter.fromConnectData(statusTopic, STATUS_SCHEMA_V0, struct); } @@ -504,7 +508,7 @@ protected byte[] serializeTopicStatus(TopicStatus status) { return converter.fromConnectData( statusTopic, TOPIC_STATUS_SCHEMA_V0, - Collections.singletonMap(TOPIC_STATE_KEY, struct)); + Map.of(TOPIC_STATE_KEY, struct)); } private String parseConnectorStatusKey(String key) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java index f4981e809530f..254aaf89584a6 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java @@ -22,11 +22,11 @@ import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import org.apache.kafka.connect.util.ConnectorTaskId; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; @@ -82,11 +82,11 @@ public synchronized ClusterConfigState snapshot() { connectorConfigs, connectorTargetStates, taskConfigs, - Collections.emptyMap(), - Collections.emptyMap(), + Map.of(), + Map.of(), appliedConnectorConfigs, - Collections.emptySet(), - Collections.emptySet(), + Set.of(), + Set.of(), configTransformer ); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java index a51a405d3de0f..a465bea968937 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java @@ -24,7 +24,6 @@ import org.apache.kafka.connect.util.Table; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -119,8 +118,8 @@ public TopicStatus getTopic(String connector, String topic) { public Collection getAllTopics(String connector) { ConcurrentMap activeTopics = topics.get(Objects.requireNonNull(connector)); return activeTopics != null - ? Collections.unmodifiableCollection(activeTopics.values()) - : Collections.emptySet(); + ? Set.copyOf(activeTopics.values()) + : Set.of(); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java index d9776e05dd3db..c17d2fb099ca0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java @@ -23,11 +23,10 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CancellationException; @@ -61,7 +60,7 @@ public OffsetStorageReaderImpl(OffsetBackingStore backingStore, String namespace @Override public Map offset(Map partition) { - return offsets(Collections.singletonList(partition)).get(partition); + return offsets(List.of(partition)).get(partition); } @Override @@ -73,7 +72,7 @@ public Map, Map> offsets(Collection> DocInfo(Class

    predicateClass, String overvie } } - private static final List PREDICATES = new Plugins(Collections.emptyMap()).predicates().stream() + private static final List PREDICATES = new Plugins(Map.of()).predicates().stream() .map(p -> { try { String overviewDoc = (String) p.pluginClass().getDeclaredField("OVERVIEW_DOC").get(null); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java index 95eeed2e0f499..e3e9ad063d2f7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java @@ -34,7 +34,6 @@ import org.apache.kafka.connect.transforms.TimestampRouter; import org.apache.kafka.connect.transforms.ValueToKey; -import java.util.Arrays; import java.util.List; public class TransformationDoc { @@ -42,7 +41,7 @@ public class TransformationDoc { private record DocInfo(String transformationName, String overview, ConfigDef configDef) { } - private static final List TRANSFORMATIONS = Arrays.asList( + private static final List TRANSFORMATIONS = List.of( new DocInfo(Cast.class.getName(), Cast.OVERVIEW_DOC, Cast.CONFIG_DEF), new DocInfo(DropHeaders.class.getName(), DropHeaders.OVERVIEW_DOC, DropHeaders.CONFIG_DEF), new DocInfo(ExtractField.class.getName(), ExtractField.OVERVIEW_DOC, ExtractField.CONFIG_DEF), diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java index e36df1b7dbc57..5452ee9e1ee18 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java @@ -44,7 +44,6 @@ import java.time.Duration; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -185,8 +184,8 @@ public static KafkaBasedLog withExistingClients(String topic, Objects.requireNonNull(topicAdmin); Objects.requireNonNull(readTopicPartition); return new KafkaBasedLog<>(topic, - Collections.emptyMap(), - Collections.emptyMap(), + Map.of(), + Map.of(), () -> topicAdmin, consumedCallback, time, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java index fa6a0b9cccd91..a83c515e73adf 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java @@ -19,9 +19,9 @@ import org.slf4j.MDC; import java.util.Collection; -import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * A utility for defining Mapped Diagnostic Context (MDC) for SLF4J logs. @@ -49,7 +49,7 @@ public final class LoggingContext implements AutoCloseable { */ public static final String CONNECTOR_CONTEXT = "connector.context"; - public static final Collection ALL_CONTEXTS = Collections.singleton(CONNECTOR_CONTEXT); + public static final Collection ALL_CONTEXTS = Set.of(CONNECTOR_CONTEXT); /** * The Scope values used by Connect when specifying the context. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java index 70bcf8c427e6b..620eec2f13933 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java @@ -23,7 +23,6 @@ import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -48,7 +47,7 @@ public static ConnectorOffsets consumerGroupOffsetsToConnectorOffsets(Map row(R row) { Map columns = table.get(row); if (columns == null) - return Collections.emptyMap(); - return Collections.unmodifiableMap(columns); + return Map.of(); + return Map.copyOf(columns); } public boolean isEmpty() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java index 348beb002330c..67285c1c197cc 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java @@ -56,7 +56,6 @@ import java.time.Duration; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -73,8 +72,8 @@ */ public class TopicAdmin implements AutoCloseable { - public static final TopicCreationResponse EMPTY_CREATION = new TopicCreationResponse(Collections.emptySet(), Collections.emptySet()); - private static final List> CAUSES_TO_RETRY_TOPIC_CREATION = Arrays.asList( + public static final TopicCreationResponse EMPTY_CREATION = new TopicCreationResponse(Set.of(), Set.of()); + private static final List> CAUSES_TO_RETRY_TOPIC_CREATION = List.of( InvalidReplicationFactorException.class, TimeoutException.class); @@ -84,8 +83,8 @@ public static class TopicCreationResponse { private final Set existing; public TopicCreationResponse(Set createdTopicNames, Set existingTopicNames) { - this.created = Collections.unmodifiableSet(createdTopicNames); - this.existing = Collections.unmodifiableSet(existingTopicNames); + this.created = Set.copyOf(createdTopicNames); + this.existing = Set.copyOf(existingTopicNames); } public Set createdTopics() { @@ -473,12 +472,12 @@ public TopicCreationResponse createOrFindTopics(NewTopic... topics) { */ public Map describeTopics(String... topics) { if (topics == null) { - return Collections.emptyMap(); + return Map.of(); } String topicNameList = String.join(", ", topics); Map> newResults = - admin.describeTopics(Arrays.asList(topics), new DescribeTopicsOptions()).topicNameValues(); + admin.describeTopics(List.of(topics), new DescribeTopicsOptions()).topicNameValues(); // Iterate over each future so that we can handle individual failures like when some topics don't exist Map existingTopics = new HashMap<>(); @@ -536,7 +535,7 @@ public boolean verifyTopicCleanupPolicyOnlyCompact(String topic, String workerTo + "describe topic configurations.", topic, TopicConfig.CLEANUP_POLICY_COMPACT); return false; } - Set expectedPolicies = Collections.singleton(TopicConfig.CLEANUP_POLICY_COMPACT); + Set expectedPolicies = Set.of(TopicConfig.CLEANUP_POLICY_COMPACT); if (!cleanupPolicies.equals(expectedPolicies)) { String expectedPolicyStr = String.join(",", expectedPolicies); String cleanupPolicyStr = String.join(",", cleanupPolicies); @@ -566,7 +565,7 @@ public Set topicCleanupPolicy(String topic) { if (topicConfig == null) { // The topic must not exist log.debug("Unable to find topic '{}' when getting cleanup policy", topic); - return Collections.emptySet(); + return Set.of(); } ConfigEntry entry = topicConfig.get(CLEANUP_POLICY_CONFIG); if (entry != null && entry.value() != null) { @@ -581,7 +580,7 @@ public Set topicCleanupPolicy(String topic) { // This is unexpected, as the topic config should include the cleanup.policy even if // the topic settings don't override the broker's log.cleanup.policy. But just to be safe. log.debug("Found no cleanup.policy for topic '{}'", topic); - return Collections.emptySet(); + return Set.of(); } /** @@ -620,7 +619,7 @@ public Config describeTopicConfig(String topic) { */ public Map describeTopicConfigs(String... topicNames) { if (topicNames == null) { - return Collections.emptyMap(); + return Map.of(); } Collection topics = Arrays.stream(topicNames) .filter(Objects::nonNull) @@ -628,7 +627,7 @@ public Map describeTopicConfigs(String... topicNames) { .filter(s -> !s.isEmpty()) .collect(Collectors.toList()); if (topics.isEmpty()) { - return Collections.emptyMap(); + return Map.of(); } String topicNameList = String.join(", ", topics); Collection resources = topics.stream() @@ -686,7 +685,7 @@ public Map describeTopicConfigs(String... topicNames) { */ public Map endOffsets(Set partitions) { if (partitions == null || partitions.isEmpty()) { - return Collections.emptyMap(); + return Map.of(); } Map offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())); ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap, new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED)); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java index 45c12aa292a39..f98d1afa5b239 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java @@ -18,7 +18,6 @@ import org.apache.kafka.connect.runtime.WorkerConfig; -import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; @@ -32,7 +31,7 @@ */ public class TopicCreation { private static final TopicCreation EMPTY = - new TopicCreation(false, null, Collections.emptyMap(), Collections.emptySet()); + new TopicCreation(false, null, Map.of(), Set.of()); private final boolean isTopicCreationEnabled; private final TopicCreationGroup defaultTopicGroup; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java index 5393fd2a01310..e5694c944d4c8 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java @@ -20,7 +20,6 @@ import org.apache.kafka.connect.runtime.SourceConnectorConfig; import org.apache.kafka.connect.runtime.TopicCreationConfig; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -64,7 +63,7 @@ protected TopicCreationGroup(String group, SourceConnectorConfig config) { */ public static Map configuredGroups(SourceConnectorConfig config) { if (!config.usesTopicCreation()) { - return Collections.emptyMap(); + return Map.of(); } List groupNames = config.getList(TOPIC_CREATION_GROUPS_CONFIG); Map groups = new LinkedHashMap<>(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java index 82d67254f504b..286a8c212fe8e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java @@ -22,7 +22,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -32,7 +31,7 @@ public class NoneConnectorClientConfigOverridePolicyTest extends BaseConnectorCl @Test public void testNoOverrides() { - testValidOverride(Collections.emptyMap()); + testValidOverride(Map.of()); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java index 94567f960f4ee..1b566a6de7747 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java @@ -22,7 +22,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -32,8 +31,7 @@ public class PrincipalConnectorClientConfigOverridePolicyTest extends BaseConnec @Test public void testPrincipalOnly() { - Map clientConfig = Collections.singletonMap(SaslConfigs.SASL_JAAS_CONFIG, "test"); - testValidOverride(clientConfig); + testValidOverride(Map.of(SaslConfigs.SASL_JAAS_CONFIG, "test")); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java index eac691ab06761..89456699e6933 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java @@ -25,7 +25,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -42,7 +42,7 @@ public class BooleanConverterTest { @BeforeEach public void setUp() { - converter.configure(Collections.emptyMap(), false); + converter.configure(Map.of(), false); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java index 7386360f4fa82..0d926e42dd6f6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java @@ -27,7 +27,7 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -42,7 +42,7 @@ public class ByteArrayConverterTest { @BeforeEach public void setUp() { - converter.configure(Collections.emptyMap(), false); + converter.configure(Map.of(), false); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java index f47be0ba5d1d8..87608fa07df57 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java @@ -48,7 +48,6 @@ import org.slf4j.LoggerFactory; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -577,7 +576,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { block.maybeBlockOn(CONNECTOR_TASK_CONFIGS); - return Collections.singletonList(Collections.emptyMap()); + return List.of(Map.of()); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java index ad8328deabe11..d46d76c3606ef 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java @@ -58,7 +58,6 @@ import java.io.FileOutputStream; import java.nio.file.Path; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -229,7 +228,7 @@ public void testRestartFailedTask() throws Exception { // Restart the failed task String taskRestartEndpoint = connect.endpointForResource( String.format("connectors/%s/tasks/0/restart", CONNECTOR_NAME)); - connect.requestPost(taskRestartEndpoint, "", Collections.emptyMap()); + connect.requestPost(taskRestartEndpoint, "", Map.of()); // Ensure the task started successfully this time connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, numTasks, @@ -374,7 +373,7 @@ public void testSourceTaskNotBlockedOnShutdownWithNonExistentTopic() throws Exce NUM_TASKS, "Connector tasks did not start in time"); connector.awaitRecords(TimeUnit.MINUTES.toMillis(1)); - // Then if we delete the connector, it and each of its tasks should be stopped by the framework + // Then, if we delete the connector, it and each of its tasks should be stopped by the framework // even though the producer is blocked because there is no topic StartAndStopLatch stopCounter = connector.expectedStops(1); connect.deleteConnector(CONNECTOR_NAME); @@ -434,8 +433,8 @@ public void testPauseStopResume() throws Exception { "Connector did not stop in time" ); // If the connector is truly stopped, we should also see an empty set of tasks and task configs - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Transition to RUNNING connect.resumeConnector(CONNECTOR_NAME); @@ -463,8 +462,8 @@ public void testPauseStopResume() throws Exception { CONNECTOR_NAME, "Connector did not stop in time" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Transition to PAUSED connect.pauseConnector(CONNECTOR_NAME); @@ -520,8 +519,8 @@ public void testStoppedState() throws Exception { "Connector did not stop in time" ); // If the connector is truly stopped, we should also see an empty set of tasks and task configs - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Can resume a connector after its Connector has failed before shutdown after receiving a stop request props.remove("connector.start.inject.error"); @@ -542,8 +541,8 @@ public void testStoppedState() throws Exception { CONNECTOR_NAME, "Connector did not stop in time" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Can resume a connector after its Connector has failed during shutdown after receiving a stop request connect.resumeConnector(CONNECTOR_NAME); @@ -580,8 +579,8 @@ public void testCreateConnectorWithPausedInitialState() throws Exception { 0, "Connector was not created in a paused state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the PAUSED state can be resumed successfully connect.resumeConnector(CONNECTOR_NAME); @@ -615,16 +614,16 @@ public void testCreateSourceConnectorWithStoppedInitialStateAndModifyOffsets() t CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that the offsets can be modified for a source connector created in the STOPPED state // Alter the offsets so that only 5 messages are produced connect.alterSourceConnectorOffset( CONNECTOR_NAME, - Collections.singletonMap("task.id", CONNECTOR_NAME + "-0"), - Collections.singletonMap("saved", 5L) + Map.of("task.id", CONNECTOR_NAME + "-0"), + Map.of("saved", 5L) ); // Verify that a connector created in the STOPPED state can be resumed successfully @@ -669,8 +668,8 @@ public void testCreateSinkConnectorWithStoppedInitialStateAndModifyOffsets() thr CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that the offsets can be modified for a sink connector created in the STOPPED state @@ -726,8 +725,8 @@ public void testDeleteConnectorCreatedWithPausedOrStoppedInitialState() throws E 0, "Connector was not created in a paused state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the PAUSED state can be deleted successfully connect.deleteConnector(CONNECTOR_NAME); @@ -747,8 +746,8 @@ public void testDeleteConnectorCreatedWithPausedOrStoppedInitialState() throws E CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the STOPPED state can be deleted successfully connect.deleteConnector(CONNECTOR_NAME); @@ -1014,7 +1013,7 @@ public void testTasksMaxEnforcement() throws Exception { // an existing set of task configs that was written before the cluster was upgraded try (JsonConverter converter = new JsonConverter()) { converter.configure( - Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), + Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), false ); @@ -1326,7 +1325,7 @@ public void testRuntimePropertyReconfiguration() throws Exception { "Connector did not start or task did not fail in time" ); assertEquals( - new ConnectorOffsets(Collections.emptyList()), + new ConnectorOffsets(List.of()), connect.connectorOffsets(CONNECTOR_NAME), "Connector should not have any committed offsets when only task fails on first record" ); @@ -1346,9 +1345,9 @@ public void testRuntimePropertyReconfiguration() throws Exception { Map expectedOffsetKey = new HashMap<>(); expectedOffsetKey.put(SinkUtils.KAFKA_TOPIC_KEY, topic); expectedOffsetKey.put(SinkUtils.KAFKA_PARTITION_KEY, 0); - Map expectedOffsetValue = Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 1); + Map expectedOffsetValue = Map.of(SinkUtils.KAFKA_OFFSET_KEY, 1); ConnectorOffset expectedOffset = new ConnectorOffset(expectedOffsetKey, expectedOffsetValue); - ConnectorOffsets expectedOffsets = new ConnectorOffsets(Collections.singletonList(expectedOffset)); + ConnectorOffsets expectedOffsets = new ConnectorOffsets(List.of(expectedOffset)); // Wait for it to commit offsets, signaling that it has successfully processed the record we produced earlier waitForCondition( @@ -1443,7 +1442,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { return IntStream.range(0, maxTasks) - .mapToObj(i -> Collections.emptyMap()) + .mapToObj(i -> Map.of()) .collect(Collectors.toList()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java index 074c6eb91fb25..7faf2311c133d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java @@ -23,14 +23,12 @@ import org.slf4j.LoggerFactory; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import java.util.stream.Collectors; import java.util.stream.IntStream; /** @@ -283,8 +281,8 @@ public StartAndStopLatch expectedStarts(int expectedStarts, boolean includeTasks List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStarts(expectedStarts)) - .collect(Collectors.toList()) - : Collections.emptyList(); + .toList() + : List.of(); return startAndStopCounter.expectedStarts(expectedStarts, taskLatches); } @@ -292,8 +290,8 @@ public StartAndStopLatch expectedStarts(int expectedStarts, Map List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStarts(expectedTasksStarts.get(task.taskId()))) - .collect(Collectors.toList()) - : Collections.emptyList(); + .toList() + : List.of(); return startAndStopCounter.expectedStarts(expectedStarts, taskLatches); } @@ -345,8 +343,8 @@ public StartAndStopLatch expectedStops(int expectedStops, boolean includeTasks) List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStops(expectedStops)) - .collect(Collectors.toList()) - : Collections.emptyList(); + .toList() + : List.of(); return startAndStopCounter.expectedStops(expectedStops, taskLatches); } @@ -354,8 +352,8 @@ public StartAndStopLatch expectedStops(int expectedStops, Map e List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStops(expectedTasksStops.get(task.taskId()))) - .collect(Collectors.toList()) - : Collections.emptyList(); + .toList() + : List.of(); return startAndStopCounter.expectedStops(expectedStops, taskLatches); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java index 2070f78559131..2859a1c71f063 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java @@ -30,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -131,7 +130,7 @@ public void testRestartUnknownConnectorNoParams() { // Call the Restart API String restartEndpoint = connect.endpointForResource( String.format("connectors/%s/restart", connectorName)); - Response response = connect.requestPost(restartEndpoint, "", Collections.emptyMap()); + Response response = connect.requestPost(restartEndpoint, "", Map.of()); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); } @@ -152,7 +151,7 @@ private void restartUnknownConnector(boolean onlyFailed, boolean includeTasks) { // Call the Restart API String restartEndpoint = connect.endpointForResource( String.format("connectors/%s/restart?onlyFailed=" + onlyFailed + "&includeTasks=" + includeTasks, connectorName)); - Response response = connect.requestPost(restartEndpoint, "", Collections.emptyMap()); + Response response = connect.requestPost(restartEndpoint, "", Map.of()); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); } @@ -213,7 +212,7 @@ public void testFailedTasksRestartBothConnectorAndTasks() throws Exception { @Test public void testOneFailedTasksRestartOnlyOneTasks() throws Exception { - Set tasksToFail = Collections.singleton(taskId(1)); + Set tasksToFail = Set.of(taskId(1)); failedTasksRestart(true, true, 0, buildExpectedTasksRestarts(tasksToFail), tasksToFail, false); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java index c5fcfbca0a7d3..d8572ef3ba957 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java @@ -34,8 +34,6 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -43,7 +41,6 @@ import java.util.Objects; import java.util.Properties; import java.util.Set; -import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; @@ -115,7 +112,7 @@ public void testGetActiveTopics() throws InterruptedException { connect.kafka().createTopic(FOO_TOPIC, NUM_TOPIC_PARTITIONS); connect.kafka().createTopic(BAR_TOPIC, NUM_TOPIC_PARTITIONS); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), "Active topic set is not empty for connector: " + FOO_CONNECTOR); // start a source connector @@ -124,8 +121,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(FOO_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.singletonList(FOO_TOPIC), - "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(FOO_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); // start another source connector connect.configureConnector(BAR_CONNECTOR, defaultSourceConnectorProps(BAR_TOPIC)); @@ -133,8 +130,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(BAR_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, Collections.singletonList(BAR_TOPIC), - "Active topic set is not: " + Collections.singletonList(BAR_TOPIC) + " for connector: " + BAR_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, List.of(BAR_TOPIC), + "Active topic set is not: " + List.of(BAR_TOPIC) + " for connector: " + BAR_CONNECTOR); // start a sink connector connect.configureConnector(SINK_CONNECTOR, defaultSinkConnectorProps(FOO_TOPIC, BAR_TOPIC)); @@ -142,8 +139,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(SINK_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Arrays.asList(FOO_TOPIC, BAR_TOPIC), - "Active topic set is not: " + Arrays.asList(FOO_TOPIC, BAR_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC, BAR_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC, BAR_TOPIC) + " for connector: " + SINK_CONNECTOR); // deleting a connector resets its active topics connect.deleteConnector(BAR_CONNECTOR); @@ -151,7 +148,7 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorDoesNotExist(BAR_CONNECTOR, "Connector wasn't deleted in time."); - connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, Collections.emptyList(), + connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, List.of(), "Active topic set is not empty for deleted connector: " + BAR_CONNECTOR); // Unfortunately there's currently no easy way to know when the consumer caught up with @@ -162,8 +159,8 @@ public void testGetActiveTopics() throws InterruptedException { // reset active topics for the sink connector after one of the topics has become idle connect.resetConnectorTopics(SINK_CONNECTOR); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), - "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); } @Test @@ -177,7 +174,7 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.kafka().createTopic(FOO_TOPIC, NUM_TOPIC_PARTITIONS); connect.kafka().createTopic(BAR_TOPIC, NUM_TOPIC_PARTITIONS); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), "Active topic set is not empty for connector: " + FOO_CONNECTOR); // start a source connector @@ -186,8 +183,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(FOO_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.singletonList(FOO_TOPIC), - "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(FOO_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); // start a sink connector connect.configureConnector(SINK_CONNECTOR, defaultSinkConnectorProps(FOO_TOPIC)); @@ -195,8 +192,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(SINK_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), - "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); // deleting a connector resets its active topics connect.deleteConnector(FOO_CONNECTOR); @@ -204,7 +201,7 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorDoesNotExist(FOO_CONNECTOR, "Connector wasn't deleted in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), "Active topic set is not empty for deleted connector: " + FOO_CONNECTOR); // Unfortunately there's currently no easy way to know when the consumer caught up with @@ -216,8 +213,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { Exception e = assertThrows(ConnectRestException.class, () -> connect.resetConnectorTopics(SINK_CONNECTOR)); assertTrue(e.getMessage().contains("Topic tracking reset is disabled.")); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), - "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), + "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); } @Test @@ -252,7 +249,7 @@ public void testTopicTrackingIsDisabled() throws InterruptedException { public void assertNoTopicStatusInStatusTopic() { String statusTopic = workerProps.get(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG); Consumer verifiableConsumer = connect.kafka().createConsumer( - Collections.singletonMap("group.id", "verifiable-consumer-group-0")); + Map.of("group.id", "verifiable-consumer-group-0")); List partitionInfos = verifiableConsumer.partitionsFor(statusTopic); if (partitionInfos.isEmpty()) { @@ -260,7 +257,7 @@ public void assertNoTopicStatusInStatusTopic() { } List partitions = partitionInfos.stream() .map(info -> new TopicPartition(info.topic(), info.partition())) - .collect(Collectors.toList()); + .toList(); verifiableConsumer.assign(partitions); // Based on the implementation of {@link org.apache.kafka.connect.util.KafkaBasedLog#readToLogEnd} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java index 3eeeab0d4056c..44f895b8a513b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java @@ -67,7 +67,6 @@ import java.io.Closeable; import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -306,7 +305,7 @@ public void testPollBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -366,7 +365,7 @@ public void testIntervalBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -427,7 +426,7 @@ public void testConnectorBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords sourceRecords = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -538,7 +537,7 @@ public void testFencedLeaderRecovery() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -601,7 +600,7 @@ public void testConnectorReconfiguration() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -664,7 +663,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { String topic = "test-topic"; try (Admin admin = connect.kafka().createAdminClient()) { - admin.createTopics(Collections.singleton(new NewTopic(topic, 3, (short) 1))).all().get(); + admin.createTopics(Set.of(new NewTopic(topic, 3, (short) 1))).all().get(); } Map props = new HashMap<>(); @@ -690,7 +689,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { // Grant the connector's admin permissions to access the topics for its records and offsets // Intentionally leave out permissions required for fencing try (Admin admin = connect.kafka().createAdminClient()) { - admin.createAcls(Arrays.asList( + admin.createAcls(List.of( new AclBinding( new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -737,7 +736,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { // Now grant the necessary permissions for fencing to the connector's admin try (Admin admin = connect.kafka().createAdminClient()) { - admin.createAcls(Arrays.asList( + admin.createAcls(List.of( new AclBinding( new ResourcePattern(ResourceType.TRANSACTIONAL_ID, Worker.taskTransactionalId(CLUSTER_GROUP_ID, CONNECTOR_NAME, 0), PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -864,7 +863,7 @@ public void testSeparateOffsetsTopic() throws Exception { .consume( MINIMUM_MESSAGES, TimeUnit.MINUTES.toMillis(1), - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), "test-topic") .count(); assertTrue(recordNum >= MINIMUM_MESSAGES, @@ -874,7 +873,7 @@ public void testSeparateOffsetsTopic() throws Exception { ConsumerRecords offsetRecords = connectorTargetedCluster .consumeAll( TimeUnit.MINUTES.toMillis(1), - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, offsetsTopic ); @@ -930,7 +929,7 @@ public void testSeparateOffsetsTopic() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords sourceRecords = connectorTargetedCluster.consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -939,7 +938,7 @@ public void testSeparateOffsetsTopic() throws Exception { // also have to check which offsets have actually been committed, since we no longer have exactly-once semantics offsetRecords = connectorTargetedCluster.consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, offsetsTopic ); @@ -991,7 +990,7 @@ public void testPotentialDeadlockWhenProducingToOffsetsTopic() throws Exception } private ConfigInfo findConfigInfo(String property, ConfigInfos validationResult) { - return validationResult.values().stream() + return validationResult.configs().stream() .filter(info -> property.equals(info.configKey().name())) .findAny() .orElseThrow(() -> new AssertionError("Failed to find configuration validation result for property '" + property + "'")); @@ -999,13 +998,13 @@ private ConfigInfo findConfigInfo(String property, ConfigInfos validationResult) private List parseAndAssertOffsetsForSingleTask(ConsumerRecords offsetRecords) { Map> parsedOffsets = parseOffsetForTasks(offsetRecords); - assertEquals(Collections.singleton(0), parsedOffsets.keySet(), "Expected records to only be produced from a single task"); + assertEquals(Set.of(0), parsedOffsets.keySet(), "Expected records to only be produced from a single task"); return parsedOffsets.get(0); } private List parseAndAssertValuesForSingleTask(ConsumerRecords sourceRecords) { Map> parsedValues = parseValuesForTasks(sourceRecords); - assertEquals(Collections.singleton(0), parsedValues.keySet(), "Expected records to only be produced from a single task"); + assertEquals(Set.of(0), parsedValues.keySet(), "Expected records to only be produced from a single task"); return parsedValues.get(0); } @@ -1024,7 +1023,7 @@ private void assertAtLeastOnceSeqnos(ConsumerRecords sourceRecor parsedValues.replaceAll((task, values) -> { Long committedValue = lastCommittedValues.get(task); assertNotNull(committedValue, "No committed offset found for task " + task); - return values.stream().filter(v -> v <= committedValue).collect(Collectors.toList()); + return values.stream().filter(v -> v <= committedValue).toList(); }); assertSeqnos(parsedValues, numTasks); } @@ -1102,7 +1101,7 @@ private Map> parseOffsetForTasks(ConsumerRecords> result = new HashMap<>(); for (ConsumerRecord offsetRecord : offsetRecords) { @@ -1284,7 +1283,7 @@ public List poll() { // Request a read to the end of the offsets topic context.offsetStorageReader().offset(Collections.singletonMap("", null)); // Produce a record to the offsets topic - return Collections.singletonList(new SourceRecord(null, null, topic, null, "", null, null)); + return List.of(new SourceRecord(null, null, topic, null, "", null, null)); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java index d85ac9a440cb4..d0841b26941f0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java @@ -28,7 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -286,15 +285,15 @@ public void testStartWhenInternalTopicsCreatedManuallyWithCompactForBrokersDefau } protected Map compactCleanupPolicy() { - return Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); + return Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); } protected Map deleteCleanupPolicy() { - return Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); + return Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); } protected Map noTopicSettings() { - return Collections.emptyMap(); + return Map.of(); } protected Map compactAndDeleteCleanupPolicy() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java index 80609d769685c..f86fabca7159a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java @@ -24,6 +24,7 @@ import org.apache.kafka.connect.sink.SinkRecord; import java.util.Collection; +import java.util.LinkedHashMap; import java.util.Map; public class MonitorableSinkConnector extends TestableSinkConnector { @@ -35,7 +36,7 @@ public class MonitorableSinkConnector extends TestableSinkConnector { public void start(Map props) { super.start(props); PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("start", "description", Map.of()); + metricsName = pluginMetrics.metricName("start", "description", new LinkedHashMap<>()); pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); } @@ -53,7 +54,7 @@ public static class MonitorableSinkTask extends TestableSinkTask { public void start(Map props) { super.start(props); PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("put", "description", Map.of()); + metricsName = pluginMetrics.metricName("put", "description", new LinkedHashMap<>()); pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java index 49bcbc0492001..07b7155b92543 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java @@ -23,6 +23,7 @@ import org.apache.kafka.connect.connector.Task; import org.apache.kafka.connect.source.SourceRecord; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -35,7 +36,7 @@ public class MonitorableSourceConnector extends TestableSourceConnector { public void start(Map props) { super.start(props); PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("start", "description", Map.of()); + metricsName = pluginMetrics.metricName("start", "description", new LinkedHashMap<>()); pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); } @@ -53,7 +54,7 @@ public static class MonitorableSourceTask extends TestableSourceTask { public void start(Map props) { super.start(props); PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("poll", "description", Map.of()); + metricsName = pluginMetrics.metricName("poll", "description", new LinkedHashMap<>()); pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java index e2d3972c272c9..c9edd6093bdf4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java @@ -44,7 +44,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -107,7 +106,7 @@ public void tearDown() { } try { assertEquals( - Collections.emptySet(), + Set.of(), remainingConnectors, "Some connectors were not properly cleaned up after this test" ); @@ -150,11 +149,11 @@ private static EmbeddedConnectCluster createOrReuseConnectWithWorkerProps(Map workerProps = Collections.singletonMap( + Map workerProps = Map.of( DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled" ); @@ -288,8 +287,8 @@ private void getAndVerifySourceConnectorOffsets(Map connectorCon @Test public void testAlterOffsetsNonExistentConnector() { ConnectRestException e = assertThrows(ConnectRestException.class, - () -> connect.alterConnectorOffsets("non-existent-connector", new ConnectorOffsets(Collections.singletonList( - new ConnectorOffset(Collections.emptyMap(), Collections.emptyMap()))))); + () -> connect.alterConnectorOffsets("non-existent-connector", new ConnectorOffsets(List.of( + new ConnectorOffset(Map.of(), Map.of()))))); assertEquals(404, e.errorCode()); } @@ -304,8 +303,7 @@ public void testAlterOffsetsNonStoppedConnector() throws Exception { // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsets.add( - new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), - Collections.singletonMap("saved", 5)) + new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 5)) ); } @@ -403,7 +401,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, i); - offsetsToAlter.add(new ConnectorOffset(partition, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 5))); + offsetsToAlter.add(new ConnectorOffset(partition, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 5))); } // Alter the sink connector's offsets, with retry logic (since we just stopped the connector) @@ -425,7 +423,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, i); - offsetsToAlter.add(new ConnectorOffset(partition, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 3))); + offsetsToAlter.add(new ConnectorOffset(partition, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 3))); } response = connect.alterConnectorOffsets(connectorName, new ConnectorOffsets(offsetsToAlter)); @@ -475,7 +473,7 @@ public void testAlterSinkConnectorOffsetsZombieSinkTasks() throws Exception { Map partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, 0); - List offsetsToAlter = Collections.singletonList(new ConnectorOffset(partition, null)); + List offsetsToAlter = List.of(new ConnectorOffset(partition, null)); ConnectRestException e = assertThrows(ConnectRestException.class, () -> connect.alterConnectorOffsets(connectorName, new ConnectorOffsets(offsetsToAlter))); @@ -602,8 +600,7 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( - new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), - Collections.singletonMap("saved", 5)) + new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 5)) ); } @@ -623,8 +620,7 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( - new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), - Collections.singletonMap("saved", 7)) + new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 7)) ); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java index cba432849415c..513e064ddb682 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java @@ -30,8 +30,8 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeUnit; @@ -350,8 +350,7 @@ private boolean assertConnectorAndTasksAreUniqueAndBalanced() { private static String formatAssignment(Map> assignment) { StringBuilder result = new StringBuilder(); for (String worker : assignment.keySet().stream().sorted().toList()) { - result.append(String.format("\n%s=%s", worker, assignment.getOrDefault(worker, - Collections.emptyList()))); + result.append(String.format("\n%s=%s", worker, assignment.getOrDefault(worker, List.of()))); } return result.toString(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java index 27d64b322c95c..0e0cfa6a1ce9c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java @@ -31,7 +31,6 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -112,7 +111,7 @@ public void testRestExtensionApi() throws InterruptedException { workerId, null ), - Collections.singletonMap( + Map.of( 0, new TaskState(0, "RUNNING", workerId, null) ), diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java index 5829e3040c3ed..3831eb8f24685 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java @@ -63,7 +63,6 @@ import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -199,7 +198,7 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b .putConnectorConfig(any(), any(), isNull(), anyBoolean(), followerCallbackCaptor.capture()); // Leader will reply - ConnectorInfo connectorInfo = new ConnectorInfo("blah", Collections.emptyMap(), Collections.emptyList(), ConnectorType.SOURCE); + ConnectorInfo connectorInfo = new ConnectorInfo("blah", Map.of(), List.of(), ConnectorType.SOURCE); Herder.Created leaderAnswer = new Herder.Created<>(true, connectorInfo); ArgumentCaptor>> leaderCallbackCaptor = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java index 8c538ee3fc507..2b27f790f7eed 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java @@ -28,10 +28,10 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Properties; @@ -98,10 +98,10 @@ public void close() { public void testEagerConsumerPartitionAssignment() throws Exception { final String topic1 = "topic1", topic2 = "topic2", topic3 = "topic3"; final TopicPartition tp1 = new TopicPartition(topic1, 0), tp2 = new TopicPartition(topic2, 0), tp3 = new TopicPartition(topic3, 0); - final Collection topics = Arrays.asList(topic1, topic2, topic3); + final Collection topics = List.of(topic1, topic2, topic3); Map connectorProps = baseSinkConnectorProps(String.join(",", topics)); - // Need an eager assignor here; round robin is as good as any + // Need an eager assignor here; round-robin is as good as any connectorProps.put( CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RoundRobinAssignor.class.getName()); @@ -205,7 +205,7 @@ public void testEagerConsumerPartitionAssignment() throws Exception { public void testCooperativeConsumerPartitionAssignment() throws Exception { final String topic1 = "topic1", topic2 = "topic2", topic3 = "topic3"; final TopicPartition tp1 = new TopicPartition(topic1, 0), tp2 = new TopicPartition(topic2, 0), tp3 = new TopicPartition(topic3, 0); - final Collection topics = Arrays.asList(topic1, topic2, topic3); + final Collection topics = List.of(topic1, topic2, topic3); Map connectorProps = baseSinkConnectorProps(String.join(",", topics)); connectorProps.put( diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java index 549394fd03966..1fbdfa70b8e32 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java @@ -25,7 +25,6 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -150,7 +149,7 @@ public void testSwitchingToTopicCreationEnabled() throws InterruptedException { // start the clusters connect.start(); - connect.kafka().createTopic(BAR_TOPIC, DEFAULT_PARTITIONS, DEFAULT_REPLICATION_FACTOR, Collections.emptyMap()); + connect.kafka().createTopic(BAR_TOPIC, DEFAULT_PARTITIONS, DEFAULT_REPLICATION_FACTOR, Map.of()); connect.assertions().assertTopicsExist(BAR_TOPIC); connect.assertions().assertTopicSettings(BAR_TOPIC, DEFAULT_REPLICATION_FACTOR, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java index 176c78c97e33f..fd4438e750ff4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java @@ -30,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -38,7 +37,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; import jakarta.ws.rs.core.Response; @@ -102,7 +100,7 @@ public void testDynamicLogging() { StandaloneWorkerIntegrationTest::isModified ); assertEquals( - Collections.emptyMap(), + Map.of(), invalidModifiedLoggers, "No loggers should have a non-null last-modified timestamp" ); @@ -155,9 +153,9 @@ private Map testSetLoggingLevel( assertTrue(affectedLoggers.contains(namespace)); List invalidAffectedLoggers = affectedLoggers.stream() .filter(l -> !l.startsWith(namespace)) - .collect(Collectors.toList()); + .toList(); assertEquals( - Collections.emptyList(), + List.of(), invalidAffectedLoggers, "No loggers outside the namespace '" + namespace + "' should have been included in the response for a request to modify that namespace" @@ -188,7 +186,7 @@ private Map testSetLoggingLevel( ) ); assertEquals( - Collections.emptyMap(), + Map.of(), invalidAffectedLoggerLevels, "At least one logger in the affected namespace '" + namespace + "' does not have the expected level of '" + level @@ -199,7 +197,7 @@ private Map testSetLoggingLevel( Set droppedLoggers = Utils.diff(HashSet::new, initialLevels.keySet(), newLevels.keySet()); assertEquals( - Collections.emptySet(), + Set.of(), droppedLoggers, "At least one logger was present in the listing of all loggers " + "before the logging level for namespace '" + namespace @@ -212,7 +210,7 @@ private Map testSetLoggingLevel( e -> !hasNamespace(e, namespace) && !e.getValue().equals(initialLevels.get(e.getKey())) ); assertEquals( - Collections.emptyMap(), + Map.of(), invalidUnaffectedLoggerLevels, "At least one logger outside of the affected namespace '" + namespace + "' has a different logging level or last-modified timestamp than it did " @@ -256,8 +254,8 @@ public void testCreateConnectorWithStoppedInitialState() throws Exception { CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the STOPPED state can be resumed successfully connect.resumeConnector(CONNECTOR_NAME); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java index b864cc5759cf6..55a2d5c6d497f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java @@ -25,7 +25,6 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -92,7 +91,7 @@ public void shouldReturnTrueWhenAwaitingForStartAndStopToComplete() throws Throw @Test public void shouldReturnFalseWhenAwaitingForDependentLatchToComplete() throws Throwable { StartAndStopLatch depLatch = new StartAndStopLatch(1, 1, this::complete, null, clock); - dependents = Collections.singletonList(depLatch); + dependents = List.of(depLatch); latch = new StartAndStopLatch(1, 1, this::complete, dependents, clock); future = asyncAwait(100); @@ -106,7 +105,7 @@ public void shouldReturnFalseWhenAwaitingForDependentLatchToComplete() throws Th @Test public void shouldReturnTrueWhenAwaitingForStartAndStopAndDependentLatch() throws Throwable { StartAndStopLatch depLatch = new StartAndStopLatch(1, 1, this::complete, null, clock); - dependents = Collections.singletonList(depLatch); + dependents = List.of(depLatch); latch = new StartAndStopLatch(1, 1, this::complete, dependents, clock); future = asyncAwait(100); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java index 56835d18f8b23..dec9421058950 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java @@ -33,13 +33,11 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; -import java.util.stream.Collectors; import java.util.stream.LongStream; /** @@ -190,7 +188,7 @@ public void start(Map props) { taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); Map offset = Optional.ofNullable( context.offsetStorageReader().offset(sourcePartition(taskId))) - .orElse(Collections.emptyMap()); + .orElse(Map.of()); startingSeqno = Optional.ofNullable((Long) offset.get("saved")).orElse(0L); seqno = startingSeqno; log.info("Started {} task {} with properties {}", this.getClass().getSimpleName(), taskId, props); @@ -235,7 +233,7 @@ public List poll() { maybeDefineTransactionBoundary(record); return record; }) - .collect(Collectors.toList()); + .toList(); } return null; } @@ -295,10 +293,10 @@ private void maybeDefineTransactionBoundary(SourceRecord record) { } public static Map sourcePartition(String taskId) { - return Collections.singletonMap("task.id", taskId); + return Map.of("task.id", taskId); } public static Map sourceOffset(long seqno) { - return Collections.singletonMap("saved", seqno); + return Map.of("saved", seqno); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java index 3c4cbe48ab6dd..5eca2c24e8401 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java @@ -34,7 +34,6 @@ import java.util.Properties; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.PREDICATES_CONFIG; @@ -173,7 +172,7 @@ public void testFilterOnTopicNameWithSinkConnector() throws Exception { connectorHandle.awaitCommits(RECORD_TRANSFER_DURATION_MS); // Assert that we didn't see any baz - Map expectedRecordCounts = singletonMap(fooTopic, (long) numFooRecords); + Map expectedRecordCounts = Map.of(fooTopic, (long) numFooRecords); assertObservedRecords(observedRecords, expectedRecordCounts); // delete connector @@ -253,7 +252,7 @@ public void testFilterOnTombstonesWithSinkConnector() throws Exception { // wait for the connector tasks to commit all records. connectorHandle.awaitCommits(RECORD_TRANSFER_DURATION_MS); - Map expectedRecordCounts = singletonMap(topic, (long) (numRecords / 2)); + Map expectedRecordCounts = Map.of(topic, (long) (numRecords / 2)); assertObservedRecords(observedRecords, expectedRecordCounts); // delete connector diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java index 4ac2b9758dccf..8c32f2d33be2f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java @@ -73,11 +73,10 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -155,27 +154,27 @@ public class AbstractHerderTest { private static final ClusterConfigState SNAPSHOT = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); private static final ClusterConfigState SNAPSHOT_NO_TASKS = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), + Map.of(), + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); private final String workerId = "workerId"; private final String kafkaClusterId = "I4ZmrWqfT2e-upky_4fdPA"; @@ -197,7 +196,7 @@ public void testConnectors() { AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); - assertEquals(Collections.singleton(CONN1), new HashSet<>(herder.connectors())); + assertEquals(Set.of(CONN1), Set.copyOf(herder.connectors())); } @Test @@ -219,7 +218,7 @@ public void testConnectorStatus() { AbstractHerder herder = testHerder(); - when(herder.rawConfig(connectorName)).thenReturn(Collections.singletonMap( + when(herder.rawConfig(connectorName)).thenReturn(Map.of( ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName() )); @@ -227,7 +226,7 @@ public void testConnectorStatus() { .thenReturn(new ConnectorStatus(connectorName, AbstractStatus.State.RUNNING, workerId, generation)); when(statusStore.getAll(connectorName)) - .thenReturn(Collections.singletonList( + .thenReturn(List.of( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); ConnectorStateInfo state = herder.connectorStatus(connectorName); @@ -254,13 +253,13 @@ public void testConnectorStatusMissingPlugin() { AbstractHerder herder = testHerder(); when(herder.rawConfig(connectorName)) - .thenReturn(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); + .thenReturn(Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); when(statusStore.get(connectorName)) .thenReturn(new ConnectorStatus(connectorName, AbstractStatus.State.RUNNING, workerId, generation)); when(statusStore.getAll(connectorName)) - .thenReturn(Collections.singletonList( + .thenReturn(List.of( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); ConnectorStateInfo state = herder.connectorStatus(connectorName); @@ -291,7 +290,7 @@ public void testConnectorInfo() { assertEquals(CONN1, info.name()); assertEquals(CONN1_CONFIG, info.config()); - assertEquals(Arrays.asList(TASK0, TASK1, TASK2), info.tasks()); + assertEquals(List.of(TASK0, TASK1, TASK2), info.tasks()); assertEquals(ConnectorType.SOURCE, info.type()); } @@ -331,7 +330,7 @@ public void testConnectorInfoMissingPlugin() { assertEquals(CONN1, info.name()); assertEquals(CONN1_CONFIG, info.config()); - assertEquals(Arrays.asList(TASK0, TASK1, TASK2), info.tasks()); + assertEquals(List.of(TASK0, TASK1, TASK2), info.tasks()); assertEquals(ConnectorType.UNKNOWN, info.type()); } @@ -467,8 +466,8 @@ public void testBuildRestartPlanForNoRestart() { public void testConfigValidationEmptyConfig() { AbstractHerder herder = createConfigValidationHerder(SampleSourceConnector.class, noneConnectorClientConfigOverridePolicy, 0); - assertThrows(BadRequestException.class, () -> herder.validateConnectorConfig(Collections.emptyMap(), s -> null, false)); - verify(transformer).transform(Collections.emptyMap()); + assertThrows(BadRequestException.class, () -> herder.validateConnectorConfig(Map.of(), s -> null, false)); + verify(transformer).transform(Map.of()); assertEquals(worker.getPlugins(), plugins); } @@ -477,18 +476,18 @@ public void testConfigValidationMissingName() { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); - Map config = Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); + Map config = Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); // We expect there to be errors due to the missing name and .... Note that these assertions depend heavily on // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); - assertEquals(Arrays.asList(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, + assertEquals(List.of(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, ConnectorConfig.ERROR_GROUP, SourceConnectorConfig.TOPIC_CREATION_GROUP, SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_GROUP, SourceConnectorConfig.OFFSETS_TOPIC_GROUP), result.groups()); assertEquals(2, result.errorCount()); - Map infos = result.values().stream() + Map infos = result.configs().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); // Base connector config has 15 fields, connector's configs add 7 assertEquals(26, infos.size()); @@ -571,7 +570,7 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); // 2 transform aliases defined -> 2 plugin lookups - Mockito.lenient().when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); + Mockito.lenient().when(plugins.transformations()).thenReturn(Set.of(transformationPluginDesc())); Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); // Define 2 transformations. One has a class defined and so can get embedded configs, the other is missing @@ -590,7 +589,7 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = Arrays.asList( + List expectedGroups = List.of( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -603,7 +602,7 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - Map infos = result.values().stream() + Map infos = result.configs().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); assertEquals(33, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class @@ -625,8 +624,8 @@ public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundEx final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); - Mockito.lenient().when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); - Mockito.lenient().when(plugins.predicates()).thenReturn(Collections.singleton(predicatePluginDesc())); + Mockito.lenient().when(plugins.transformations()).thenReturn(Set.of(transformationPluginDesc())); + Mockito.lenient().when(plugins.predicates()).thenReturn(Set.of(predicatePluginDesc())); Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); Mockito.lenient().when(plugins.newPlugin(SamplePredicate.class.getName(), null, classLoader)).thenReturn(new SamplePredicate()); @@ -649,7 +648,7 @@ public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundEx // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = Arrays.asList( + List expectedGroups = List.of( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -663,7 +662,7 @@ public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundEx ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - Map infos = result.values().stream() + Map infos = result.configs().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); assertEquals(36, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class @@ -715,7 +714,7 @@ public void testConfigValidationPrincipalOnlyOverride() { // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(SampleSourceConnector.class.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = Arrays.asList( + List expectedGroups = List.of( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -727,10 +726,10 @@ public void testConfigValidationPrincipalOnlyOverride() { assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); // Base connector config has 19 fields, connector's configs add 7, and 2 producer overrides - assertEquals(28, result.values().size()); - assertTrue(result.values().stream().anyMatch( + assertEquals(28, result.configs().size()); + assertTrue(result.configs().stream().anyMatch( configInfo -> ackConfigKey.equals(configInfo.configValue().name()) && !configInfo.configValue().errors().isEmpty())); - assertTrue(result.values().stream().anyMatch( + assertTrue(result.configs().stream().anyMatch( configInfo -> saslConfigKey.equals(configInfo.configValue().name()) && configInfo.configValue().errors().isEmpty())); verifyValidationIsolation(); @@ -771,7 +770,7 @@ public void testConfigValidationAllOverride() { assertEquals(ConnectorType.SOURCE, herder.connectorType(config)); Map validatedOverriddenClientConfigs = new HashMap<>(); - for (ConfigInfo configInfo : result.values()) { + for (ConfigInfo configInfo : result.configs()) { String configName = configInfo.configKey().name(); if (overriddenClientConfigs.contains(configName)) { validatedOverriddenClientConfigs.put(configName, configInfo.configValue().value()); @@ -799,7 +798,7 @@ protected boolean isAllowed(ConfigValue configValue) { @Override public void withPluginMetrics(PluginMetrics metrics) { - metricName = metrics.metricName("name", "description", Map.of()); + metricName = metrics.metricName("name", "description", new LinkedHashMap<>()); metrics.addMetric(metricName, (Measurable) (config, now) -> count); } } @@ -855,7 +854,7 @@ public void testReverseTransformConfigs() { } private void assertErrorForKey(ConfigInfos configInfos, String testKey) { - final List errorsForKey = configInfos.values().stream() + final List errorsForKey = configInfos.configs().stream() .map(ConfigInfo::configValue) .filter(configValue -> configValue.name().equals(testKey)) .map(ConfigValueInfo::errors) @@ -890,7 +889,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithNoErrors( addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = Arrays.asList("groupB", "group C"); + List groups = List.of("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -900,7 +899,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithNoErrors( ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.values().size()); + assertEquals(values.size(), infos.configs().size()); assertEquals(0, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -921,7 +920,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithSomeError addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = Arrays.asList("groupB", "group C"); + List groups = List.of("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -931,7 +930,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithSomeError ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.values().size()); + assertEquals(values.size(), infos.configs().size()); assertEquals(1, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -952,7 +951,7 @@ public void testGenerateResultWithConfigValuesMoreThanConfigKeysAndWithSomeError addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = Arrays.asList("groupB", "group C"); + List groups = List.of("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -964,7 +963,7 @@ public void testGenerateResultWithConfigValuesMoreThanConfigKeysAndWithSomeError ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.values().size()); + assertEquals(values.size(), infos.configs().size()); assertEquals(2, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -997,7 +996,7 @@ public void testGenerateResultWithConfigValuesWithNoConfigKeysAndWithSomeErrors( ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.values().size()); + assertEquals(values.size(), infos.configs().size()); assertEquals(2, infos.errorCount()); assertNoInfoKey(infos, "config.a1"); assertNoInfoKey(infos, "config.b1"); @@ -1142,7 +1141,7 @@ public void testGetConnectorTypeWithMissingPlugin() { when(worker.getPlugins()).thenReturn(plugins); when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); AbstractHerder herder = testHerder(); - assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); + assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); } @Test @@ -1154,7 +1153,7 @@ public void testGetConnectorTypeWithNullConfig() { @Test public void testGetConnectorTypeWithEmptyConfig() { AbstractHerder herder = testHerder(); - assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.emptyMap())); + assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Map.of())); } @Test @@ -1169,9 +1168,9 @@ public void testConnectorOffsetsConnectorNotFound() { @Test public void testConnectorOffsets() throws Exception { - ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( - new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), - new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) + ConnectorOffsets offsets = new ConnectorOffsets(List.of( + new ConnectorOffset(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + new ConnectorOffset(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")) )); @SuppressWarnings("unchecked") ArgumentCaptor> workerCallback = ArgumentCaptor.forClass(Callback.class); @@ -1202,7 +1201,7 @@ public void testTaskConfigComparison() { when(snapshot.taskCount(CONN1)).thenReturn(TASK_CONFIG.size()); List> alteredTaskConfigs = new ArrayList<>(TASK_CONFIGS); - alteredTaskConfigs.set(alteredTaskConfigs.size() - 1, Collections.emptyMap()); + alteredTaskConfigs.set(alteredTaskConfigs.size() - 1, Map.of()); // Last task config is different; should report a change assertTrue(AbstractHerder.taskConfigsChanged(snapshot, CONN1, alteredTaskConfigs)); @@ -1219,15 +1218,15 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { ClusterConfigState snapshotWithNoAppliedConfig = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet() + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of() ); assertTrue(AbstractHerder.taskConfigsChanged(snapshotWithNoAppliedConfig, CONN1, TASK_CONFIGS)); @@ -1237,15 +1236,15 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { ClusterConfigState snapshotWithDifferentAppliedConfig = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(appliedConfig)), - Collections.emptySet(), - Collections.emptySet() + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(appliedConfig)), + Set.of(), + Set.of() ); assertTrue(AbstractHerder.taskConfigsChanged(snapshotWithDifferentAppliedConfig, CONN1, TASK_CONFIGS)); } @@ -1253,12 +1252,12 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { protected void addConfigKey(Map keys, String name, String group) { ConfigDef configDef = new ConfigDef().define(name, ConfigDef.Type.STRING, null, null, ConfigDef.Importance.HIGH, "doc", group, 10, - ConfigDef.Width.MEDIUM, "display name", Collections.emptyList(), null, null); + ConfigDef.Width.MEDIUM, "display name", List.of(), null, null); keys.putAll(configDef.configKeys()); } protected void addValue(List values, String name, String value, String... errors) { - values.add(new ConfigValue(name, value, new ArrayList<>(), Arrays.asList(errors))); + values.add(new ConfigValue(name, value, new ArrayList<>(), List.of(errors))); } protected void assertInfoKey(ConfigInfos infos, String name, String group) { @@ -1276,11 +1275,11 @@ protected void assertInfoValue(ConfigInfos infos, String name, String value, Str ConfigValueInfo info = findInfo(infos, name).configValue(); assertEquals(name, info.name()); assertEquals(value, info.value()); - assertEquals(Arrays.asList(errors), info.errors()); + assertEquals(List.of(errors), info.errors()); } protected ConfigInfo findInfo(ConfigInfos infos, String name) { - return infos.values() + return infos.configs() .stream() .filter(i -> i.configValue().name().equals(name)) .findFirst() @@ -1292,7 +1291,7 @@ private void testConfigProviderRegex(String rawConnConfig) { } private void testConfigProviderRegex(String rawConnConfig, boolean expected) { - Set keys = keysWithVariableValues(Collections.singletonMap("key", rawConnConfig), ConfigTransformer.DEFAULT_PATTERN); + Set keys = keysWithVariableValues(Map.of("key", rawConnConfig), ConfigTransformer.DEFAULT_PATTERN); boolean actual = !keys.isEmpty() && keys.contains("key"); assertEquals(expected, actual, String.format("%s should have matched regex", rawConnConfig)); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java index 9ad8690ca1c69..d5b15dde76f22 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java @@ -73,8 +73,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -121,8 +119,8 @@ public class AbstractWorkerSourceTaskTest { private static final String TOPIC = "topic"; private static final String OTHER_TOPIC = "other-topic"; - private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); - private static final Map OFFSET = Collections.singletonMap("key", 12); + private static final Map PARTITION = Map.of("key", "partition".getBytes()); + private static final Map OFFSET = Map.of("key", 12); // Connect-format data private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA; @@ -171,6 +169,7 @@ public void setup() { private Map workerProps() { Map props = new HashMap<>(); + props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -253,7 +252,7 @@ public void testSendRecordsConvertsData() { createWorkerTask(); // Can just use the same record for key and value - List records = Collections.singletonList( + List records = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD) ); @@ -282,7 +281,7 @@ public void testSendRecordsPropagatesTimestamp() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = Collections.singletonList( + workerTask.toSend = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); workerTask.sendRecords(); @@ -302,7 +301,7 @@ public void testSendRecordsCorruptTimestamp() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); expectApplyTransformationChain(); - workerTask.toSend = Collections.singletonList( + workerTask.toSend = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); assertThrows(InvalidRecordException.class, workerTask::sendRecords); @@ -319,7 +318,7 @@ public void testSendRecordsNoTimestamp() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = Collections.singletonList( + workerTask.toSend = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); workerTask.sendRecords(); @@ -345,7 +344,7 @@ public void testHeaders() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = Collections.singletonList( + workerTask.toSend = List.of( new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, null, connectHeaders) ); @@ -367,7 +366,7 @@ public void testHeadersWithCustomConverter() throws Exception { SampleConverterWithHeaders testConverter = new SampleConverterWithHeaders(); createWorkerTask(stringConverter, testConverter, stringConverter, RetryWithToleranceOperatorTest.noneOperator(), - Collections::emptyList, transformationChain); + List::of, transformationChain); expectSendRecord(null); expectApplyTransformationChain(); @@ -383,7 +382,7 @@ public void testHeadersWithCustomConverter() throws Exception { org.apache.kafka.connect.header.Headers headersB = new ConnectHeaders() .addString("encoding", encodingB); - workerTask.toSend = Arrays.asList( + workerTask.toSend = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "a", Schema.STRING_SCHEMA, stringA, null, headersA), new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "b", @@ -426,13 +425,13 @@ public void testTopicCreateWhenTopicExists() { expectPreliminaryCalls(TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); expectSendRecord(emptyHeaders()); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); verifySendRecord(2); @@ -452,11 +451,11 @@ public void testSendRecordsTopicDescribeRetries() { when(admin.describeTopics(TOPIC)) .thenThrow(new RetriableException(new TimeoutException("timeout"))) - .thenReturn(Collections.emptyMap()); + .thenReturn(Map.of()); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); - assertEquals(Arrays.asList(record1, record2), workerTask.toSend); + assertEquals(List.of(record1, record2), workerTask.toSend); verify(admin, never()).createOrFindTopics(any(NewTopic.class)); verifyNoMoreInteractions(admin); @@ -477,16 +476,16 @@ public void testSendRecordsTopicCreateRetries() { expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))) // First call to create the topic times out .thenThrow(new RetriableException(new TimeoutException("timeout"))) // Next attempt succeeds .thenReturn(createdTopic(TOPIC)); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); - assertEquals(Arrays.asList(record1, record2), workerTask.toSend); + assertEquals(List.of(record1, record2), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -509,9 +508,9 @@ public void testSendRecordsTopicDescribeRetriesMidway() { expectPreliminaryCalls(OTHER_TOPIC); when(admin.describeTopics(anyString())) - .thenReturn(Collections.emptyMap()) + .thenReturn(Map.of()) .thenThrow(new RetriableException(new TimeoutException("timeout"))) - .thenReturn(Collections.emptyMap()); + .thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenAnswer( (Answer) invocation -> { NewTopic newTopic = invocation.getArgument(0); @@ -519,9 +518,9 @@ public void testSendRecordsTopicDescribeRetriesMidway() { }); // Try to send 3, make first pass, second fail. Should save last record - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); workerTask.sendRecords(); - assertEquals(Collections.singletonList(record3), workerTask.toSend); + assertEquals(List.of(record3), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -532,10 +531,10 @@ public void testSendRecordsTopicDescribeRetriesMidway() { ArgumentCaptor newTopicCaptor = ArgumentCaptor.forClass(NewTopic.class); verify(admin, times(2)).createOrFindTopics(newTopicCaptor.capture()); - assertEquals(Arrays.asList(TOPIC, OTHER_TOPIC), newTopicCaptor.getAllValues() + assertEquals(List.of(TOPIC, OTHER_TOPIC), newTopicCaptor.getAllValues() .stream() .map(NewTopic::name) - .collect(Collectors.toList())); + .toList()); } @Test @@ -550,16 +549,16 @@ public void testSendRecordsTopicCreateRetriesMidway() { expectPreliminaryCalls(TOPIC); expectPreliminaryCalls(OTHER_TOPIC); - when(admin.describeTopics(anyString())).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(anyString())).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))) .thenReturn(createdTopic(TOPIC)) .thenThrow(new RetriableException(new TimeoutException("timeout"))) .thenReturn(createdTopic(OTHER_TOPIC)); // Try to send 3, make first pass, second fail. Should save last record - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); workerTask.sendRecords(); - assertEquals(Collections.singletonList(record3), workerTask.toSend); + assertEquals(List.of(record3), workerTask.toSend); verifyTopicCreation(2, TOPIC, OTHER_TOPIC); // Second call to createOrFindTopics will throw // Next they all succeed @@ -581,7 +580,7 @@ public void testTopicDescribeFails() { new ConnectException(new TopicAuthorizationException("unauthorized")) ); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); } @@ -593,12 +592,12 @@ public void testTopicCreateFails() { SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenThrow( new ConnectException(new TopicAuthorizationException("unauthorized")) ); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(admin).createOrFindTopics(any()); @@ -614,10 +613,10 @@ public void testTopicCreateFailsWithExceptionWhenCreateReturnsTopicNotCreatedOrF expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(TopicAdmin.EMPTY_CREATION); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(admin).createOrFindTopics(any()); @@ -634,10 +633,10 @@ public void testTopicCreateSucceedsWhenCreateReturnsExistingTopicFound() { expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(foundTopic(TOPIC)); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); ArgumentCaptor> sent = verifySendRecord(2); @@ -659,10 +658,10 @@ public void testTopicCreateSucceedsWhenCreateReturnsNewTopicFound() { expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(createdTopic(TOPIC)); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); ArgumentCaptor> sent = verifySendRecord(2); @@ -688,13 +687,13 @@ public void testSendRecordsRetriableException() { when(transformationChain.apply(any(), eq(record2))).thenReturn(null); when(transformationChain.apply(any(), eq(record3))).thenReturn(record3); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); when(producer.send(any(), any())).thenThrow(new RetriableException("Retriable exception")).thenReturn(null); - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); // The first two records are filtered out / dropped by the transformation chain; only the third record will be attempted to be sent. // The producer throws a RetriableException the first time we try to send the third record @@ -721,11 +720,11 @@ public void testSendRecordsFailedTransformationErrorToleranceNone() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); - workerTask.toSend = Arrays.asList(record1); + workerTask.toSend = List.of(record1); // The transformation errored out so the error should be re-raised by sendRecords with error tolerance None Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); @@ -752,7 +751,7 @@ public void testSendRecordsFailedTransformationErrorToleranceAll() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); - workerTask.toSend = Arrays.asList(record1); + workerTask.toSend = List.of(record1); // The transformation errored out so the error should be ignored & the record skipped with error tolerance all assertTrue(workerTask.sendRecords()); @@ -778,11 +777,11 @@ public void testSendRecordsConversionExceptionErrorToleranceNone() { // When we try to convert the key/value of each record, throw an exception throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); // Send records should fail when errors.tolerance is none and the conversion call fails Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); @@ -813,7 +812,7 @@ public void testSendRecordsConversionExceptionErrorToleranceAll() { // When we try to convert the key/value of each record, throw an exception throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); // With errors.tolerance to all, the failed conversion should simply skip the record, and record successful batch assertTrue(workerTask.sendRecords()); @@ -883,15 +882,15 @@ private void verifyTopicCreation(int times, String... topics) { @SuppressWarnings("SameParameterValue") private TopicAdmin.TopicCreationResponse createdTopic(String topic) { - Set created = Collections.singleton(topic); - Set existing = Collections.emptySet(); + Set created = Set.of(topic); + Set existing = Set.of(); return new TopicAdmin.TopicCreationResponse(created, existing); } @SuppressWarnings("SameParameterValue") private TopicAdmin.TopicCreationResponse foundTopic(String topic) { - Set created = Collections.emptySet(); - Set existing = Collections.singleton(topic); + Set created = Set.of(); + Set existing = Set.of(topic); return new TopicAdmin.TopicCreationResponse(created, existing); } @@ -945,13 +944,13 @@ private RecordHeaders emptyHeaders() { } private void createWorkerTask(TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { - createWorkerTask(keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, + createWorkerTask(keyConverter, valueConverter, headerConverter, toleranceOperator, List::of, transformationChain); } private void createWorkerTask() { createWorkerTask( - keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); + keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), List::of, transformationChain); } private void createWorkerTask(Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, @@ -964,7 +963,7 @@ private void createWorkerTask(Converter keyConverter, Converter valueConverter, taskId, sourceTask, statusListener, TargetState.STARTED, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, workerTransactionContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, metrics, errorHandlingMetrics, plugins.delegatingLoader(), Time.SYSTEM, retryWithToleranceOperator, - statusBackingStore, Runnable::run, errorReportersSupplier, TestPlugins.noOpLoaderSwap()) { + statusBackingStore, Runnable::run, errorReportersSupplier, null, TestPlugins.noOpLoaderSwap()) { @Override protected void prepareToInitializeTask() { } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java index dfde3ec77460b..8ba0316e20c0d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java @@ -62,12 +62,17 @@ public class ConnectMetricsTest { private static final Map DEFAULT_WORKER_CONFIG = Map.of( WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter", - WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); + WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter", + WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" + ); private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("connector", 0); - private static final Map TAGS = Map.of("t1", "v1"); - + private static final LinkedHashMap TAGS = new LinkedHashMap<>(); private ConnectMetrics metrics; + static { + TAGS.put("t1", "v1"); + } + @BeforeEach public void setUp() { metrics = new ConnectMetrics("worker1", new WorkerConfig(WorkerConfig.baseConfigDef(), DEFAULT_WORKER_CONFIG), new MockTime(), "cluster-1"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java index 5253bcb47dadf..65b378921434d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java @@ -30,7 +30,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -53,7 +52,7 @@ public class ConnectorConfigTest> { public static final Plugins MOCK_PLUGINS = new Plugins(new HashMap<>()) { @Override public Set>> transformations() { - return Collections.emptySet(); + return Set.of(); } }; @@ -472,7 +471,7 @@ public void testEnrichedConfigDef() throws ClassNotFoundException { Plugins mockPlugins = mock(Plugins.class); when(mockPlugins.newPlugin(HasDuplicateConfigTransformation.class.getName(), null, (ClassLoader) null)).thenReturn(new HasDuplicateConfigTransformation()); - when(mockPlugins.transformations()).thenReturn(Collections.emptySet()); + when(mockPlugins.transformations()).thenReturn(Set.of()); ConfigDef def = ConnectorConfig.enrich(mockPlugins, new ConfigDef(), props, false); assertEnrichedConfigDef(def, prefix, HasDuplicateConfigTransformation.MUST_EXIST_KEY, ConfigDef.Type.BOOLEAN); assertEnrichedConfigDef(def, prefix, TransformationStage.PREDICATE_CONFIG, ConfigDef.Type.STRING); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java index 70edfb0f59877..a8e001544b35c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java @@ -76,14 +76,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; import static org.apache.kafka.common.utils.Time.SYSTEM; import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; @@ -187,6 +184,7 @@ public void setup(boolean enableTopicCreation) { workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put(TOPIC_CREATION_ENABLE_CONFIG, String.valueOf(enableTopicCreation)); workerConfig = new StandaloneConfig(workerProps); sourceConfig = new SourceConnectorConfig(plugins, sourceConnectorProps(TOPIC), true); @@ -226,7 +224,7 @@ public void testErrorHandlingInSinkTasks(boolean enableTopicCreation) { LogReporter> reporter = new LogReporter.Sink(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator> retryWithToleranceOperator = operator(); - createSinkTask(initialState, retryWithToleranceOperator, singletonList(reporter)); + createSinkTask(initialState, retryWithToleranceOperator, List.of(reporter)); // valid json ConsumerRecord record1 = new ConsumerRecord<>( @@ -278,14 +276,14 @@ public void testErrorHandlingInSourceTasks(boolean enableTopicCreation) throws E LogReporter reporter = new LogReporter.Source(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator retryWithToleranceOperator = operator(); - createSourceTask(initialState, retryWithToleranceOperator, singletonList(reporter)); + createSourceTask(initialState, retryWithToleranceOperator, List.of(reporter)); // valid json Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build(); Struct struct1 = new Struct(valSchema).put("val", 1234); - SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1); + SourceRecord record1 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct1); Struct struct2 = new Struct(valSchema).put("val", 6789); - SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2); + SourceRecord record2 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct2); when(workerSourceTask.isStopping()) .thenReturn(false) @@ -295,8 +293,8 @@ public void testErrorHandlingInSourceTasks(boolean enableTopicCreation) throws E doReturn(true).when(workerSourceTask).commitOffsets(); when(sourceTask.poll()) - .thenReturn(singletonList(record1)) - .thenReturn(singletonList(record2)); + .thenReturn(List.of(record1)) + .thenReturn(List.of(record2)); expectTopicCreation(TOPIC); @@ -340,14 +338,14 @@ public void testErrorHandlingInSourceTasksWithBadConverter(boolean enableTopicCr LogReporter reporter = new LogReporter.Source(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator retryWithToleranceOperator = operator(); - createSourceTask(initialState, retryWithToleranceOperator, singletonList(reporter), badConverter()); + createSourceTask(initialState, retryWithToleranceOperator, List.of(reporter), badConverter()); // valid json Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build(); Struct struct1 = new Struct(valSchema).put("val", 1234); - SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1); + SourceRecord record1 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct1); Struct struct2 = new Struct(valSchema).put("val", 6789); - SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2); + SourceRecord record2 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct2); when(workerSourceTask.isStopping()) .thenReturn(false) @@ -357,8 +355,8 @@ public void testErrorHandlingInSourceTasksWithBadConverter(boolean enableTopicCr doReturn(true).when(workerSourceTask).commitOffsets(); when(sourceTask.poll()) - .thenReturn(singletonList(record1)) - .thenReturn(singletonList(record2)); + .thenReturn(List.of(record1)) + .thenReturn(List.of(record2)); expectTopicCreation(TOPIC); workerSourceTask.initialize(TASK_CONFIG); workerSourceTask.initializeAndStart(); @@ -392,7 +390,7 @@ private void assertSinkMetricValue(String name, double expected) { private void verifyInitializeSink() { verify(sinkTask).start(TASK_PROPS); verify(sinkTask).initialize(any(WorkerSinkTaskContext.class)); - verify(consumer).subscribe(eq(singletonList(TOPIC)), + verify(consumer).subscribe(eq(List.of(TOPIC)), any(ConsumerRebalanceListener.class)); } @@ -410,9 +408,9 @@ private void assertErrorHandlingMetricValue(String name, double expected) { private void expectTopicCreation(String topic) { if (enableTopicCreation) { - when(admin.describeTopics(topic)).thenReturn(Collections.emptyMap()); - Set created = Collections.singleton(topic); - Set existing = Collections.emptySet(); + when(admin.describeTopics(topic)).thenReturn(Map.of()); + Set created = Set.of(topic); + Set existing = Set.of(); TopicAdmin.TopicCreationResponse response = new TopicAdmin.TopicCreationResponse(created, existing); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(response); } @@ -426,9 +424,9 @@ private void createSinkTask(TargetState initialState, RetryWithToleranceOperator oo.put("schemas.enable", "false"); converter.configure(oo); - Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, ""); + Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, "test"); TransformationChain, SinkRecord> sinkTransforms = - new TransformationChain<>(singletonList(new TransformationStage<>(transformationPlugin, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); + new TransformationChain<>(List.of(new TransformationStage<>(transformationPlugin, "test", null, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); @@ -438,7 +436,7 @@ private void createSinkTask(TargetState initialState, RetryWithToleranceOperator ClusterConfigState.EMPTY, metrics, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, sinkTransforms, consumer, pluginLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, - statusBackingStore, () -> errorReporters, TestPlugins.noOpLoaderSwap()); + statusBackingStore, () -> errorReporters, null, TestPlugins.noOpLoaderSwap()); } private void createSourceTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator, List> errorReporters) { @@ -462,9 +460,9 @@ private Converter badConverter() { private void createSourceTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator, List> errorReporters, Converter converter) { - Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, ""); - TransformationChain sourceTransforms = new TransformationChain<>(singletonList( - new TransformationStage<>(transformationPlugin, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); + Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, "test"); + TransformationChain sourceTransforms = new TransformationChain<>(List.of( + new TransformationStage<>(transformationPlugin, "test", null, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); @@ -477,7 +475,7 @@ private void createSourceTask(TargetState initialState, RetryWithToleranceOperat offsetReader, offsetWriter, offsetStore, workerConfig, ClusterConfigState.EMPTY, metrics, pluginLoader, time, retryWithToleranceOperator, - statusBackingStore, Runnable::run, () -> errorReporters, TestPlugins.noOpLoaderSwap())); + statusBackingStore, Runnable::run, () -> errorReporters, null, TestPlugins.noOpLoaderSwap())); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java index a6375398d292b..4067f5aa59d72 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java @@ -68,8 +68,6 @@ import org.mockito.verification.VerificationMode; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -84,7 +82,6 @@ import java.util.function.Consumer; import java.util.stream.Collectors; -import static java.util.Collections.emptySet; import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; @@ -119,7 +116,7 @@ @MockitoSettings(strictness = Strictness.WARN) public class ExactlyOnceWorkerSourceTaskTest { private static final String TOPIC = "topic"; - private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); + private static final Map PARTITION = Map.of("key", "partition".getBytes()); private static final Map OFFSET = offset(12); // Connect-format data @@ -169,7 +166,7 @@ public class ExactlyOnceWorkerSourceTaskTest { private static final SourceRecord SOURCE_RECORD_2 = new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, VALUE_2); - private static final List RECORDS = Arrays.asList(SOURCE_RECORD_1, SOURCE_RECORD_2); + private static final List RECORDS = List.of(SOURCE_RECORD_1, SOURCE_RECORD_2); private final AtomicReference pollLatch = new AtomicReference<>(new CountDownLatch(0)); private final AtomicReference> pollRecords = new AtomicReference<>(RECORDS); @@ -198,6 +195,7 @@ public void setup(boolean enableTopicCreation) throws Exception { Thread.sleep(10); return result; }); + when(sourceTask.version()).thenReturn(null); } @AfterEach @@ -222,8 +220,8 @@ public void teardown() throws Exception { } verify(statusBackingStore, MockitoUtils.anyTimes()).getTopic(any(), any()); - verify(offsetStore, MockitoUtils.anyTimes()).primaryOffsetsTopic(); + verify(sourceTask).version(); verifyNoMoreInteractions(statusListener, producer, sourceTask, admin, offsetWriter, statusBackingStore, offsetStore, preProducerCheck, postProducerCheck); if (metrics != null) metrics.stop(); @@ -231,6 +229,7 @@ public void teardown() throws Exception { private Map workerProps() { Map props = new HashMap<>(); + props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("internal.key.converter", "org.apache.kafka.connect.json.JsonConverter"); @@ -266,7 +265,7 @@ private Map sourceConnectorProps(SourceTask.TransactionBoundary } private static Map offset(int n) { - return Collections.singletonMap("key", n); + return Map.of("key", n); } private void createWorkerTask() { @@ -284,7 +283,7 @@ private void createWorkerTask(TargetState initialState, Converter keyConverter, workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, errorHandlingMetrics, plugins.delegatingLoader(), time, RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, - sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, Collections::emptyList, TestPlugins.noOpLoaderSwap()); + sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, List::of, null, TestPlugins.noOpLoaderSwap()); } @ParameterizedTest @@ -295,7 +294,7 @@ public void testRemoveMetrics(boolean enableTopicCreation) throws Exception { workerTask.removeMetrics(); - assertEquals(emptySet(), filterToTaskMetrics(metrics.metrics().metrics().keySet())); + assertEquals(Set.of(), filterToTaskMetrics(metrics.metrics().metrics().keySet())); } private Set filterToTaskMetrics(Set metricNames) { @@ -561,7 +560,7 @@ public void testPollReturnsNoRecords(boolean enableTopicCreation) throws Excepti createWorkerTask(); // Make sure the task returns empty batches from poll before we start polling it - pollRecords.set(Collections.emptyList()); + pollRecords.set(List.of()); when(offsetWriter.beginFlush()).thenReturn(false); @@ -637,7 +636,7 @@ public void testIntervalBasedCommit(boolean enableTopicCreation) throws Exceptio time.sleep(commitInterval * 2); awaitPolls(2); - assertEquals(2, flushCount(), + assertEquals(2, flushCount(), "Two flushes should have taken place after offset commit interval has elapsed again"); awaitShutdown(); @@ -955,7 +954,7 @@ public void testSendRecordsRetries(boolean enableTopicCreation) throws Exception expectConvertHeadersAndKeyValue(); // We're trying to send three records - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); OngoingStubbing> producerSend = when(producer.send(any(), any())); // The first one is sent successfully producerSend = expectSuccessfulSend(producerSend); @@ -965,7 +964,7 @@ public void testSendRecordsRetries(boolean enableTopicCreation) throws Exception expectSuccessfulSend(producerSend); assertFalse(workerTask.sendRecords()); - assertEquals(Arrays.asList(record2, record3), workerTask.toSend); + assertEquals(List.of(record2, record3), workerTask.toSend); verify(producer).beginTransaction(); // When using poll-based transaction boundaries, we do not commit transactions while retrying delivery for a batch verify(producer, never()).commitTransaction(); @@ -1000,7 +999,7 @@ public void testSendRecordsProducerSendFailsImmediately(boolean enableTopicCreat when(producer.send(any(), any())) .thenThrow(new KafkaException("Producer closed while send in progress", new InvalidTopicException(TOPIC))); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(producer).beginTransaction(); @@ -1079,7 +1078,7 @@ private void awaitPolls(int minimum, List records) { } private void awaitEmptyPolls(int minimum) { - awaitPolls(minimum, Collections.emptyList()); + awaitPolls(minimum, List.of()); } private void awaitPolls(int minimum) { @@ -1167,8 +1166,8 @@ private void expectTaskGetTopic() { private void expectPossibleTopicCreation() { if (config.topicCreationEnable()) { - Set created = Collections.singleton(TOPIC); - Set existing = Collections.emptySet(); + Set created = Set.of(TOPIC); + Set existing = Set.of(); TopicAdmin.TopicCreationResponse creationResponse = new TopicAdmin.TopicCreationResponse(created, existing); when(admin.createOrFindTopics(any())).thenReturn(creationResponse); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java index f76c05a005169..18edc9c6f8431 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java @@ -27,7 +27,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Collections; +import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -49,7 +49,7 @@ public void testNewRecordHeaders() { assertTrue(sinkRecord.headers().isEmpty()); SinkRecord newRecord = internalSinkRecord.newRecord(TOPIC, 0, null, null, null, - null, null, Collections.singletonList(mock(Header.class))); + null, null, List.of(mock(Header.class))); assertEquals(1, newRecord.headers().size()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java index d26cbcd1fdb2f..f2b034c08d391 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java @@ -21,6 +21,8 @@ import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -47,6 +49,13 @@ public void setup() { time = new MockTime(0, INITIAL_TIME, 0); loggers = (Loggers.Log4jLoggers) Loggers.newInstance(time); } + + @AfterEach + public void tearDown() { + // Reset LoggerContext to its initial configuration. + // This ensures any log level changes made in a test do not leak into subsequent tests. + LoggerContext.getContext(false).reconfigure(); + } @Test public void testLevelWithNullLoggerName() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java index c5f9f8314d9ef..b6548651418d6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java @@ -49,6 +49,7 @@ public class MockConnectMetrics extends ConnectMetrics { DEFAULT_WORKER_CONFIG.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); DEFAULT_WORKER_CONFIG.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); DEFAULT_WORKER_CONFIG.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); + DEFAULT_WORKER_CONFIG.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); } public MockConnectMetrics() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java index ceaf446ffd863..6d4692bae1053 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java @@ -34,8 +34,6 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -72,7 +70,7 @@ public void testGetLoggersIgnoresNullLevels() { Loggers loggers = new TestLoggers(root, a, b); - Map expectedLevels = Collections.singletonMap( + Map expectedLevels = Map.of( "b", new LoggerLevel(Level.INFO.toString(), null) ); @@ -137,7 +135,7 @@ public void testSetLevel() { TestLoggers loggers = new TestLoggers(root, x, y, z, w); List modified = loggers.setLevel("a.b.c.p", Level.WARN.name()); - assertEquals(Arrays.asList("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); + assertEquals(List.of("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); assertEquals(Level.WARN.toString(), loggers.level("a.b.c.p").level()); assertEquals(Level.WARN, x.getLevel()); assertEquals(Level.WARN, y.getLevel()); @@ -201,7 +199,7 @@ public void testSetRootLevel() { Loggers loggers = new TestLoggers(root, x, y, z, w); List modified = loggers.setLevel(rootLoggerName, Level.DEBUG.name()); - assertEquals(Arrays.asList("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", rootLoggerName), modified); + assertEquals(List.of("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", rootLoggerName), modified); assertEquals(Level.DEBUG, p.getLevel()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java index 8d6f54ce2581b..d0f3f974c635f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java @@ -35,17 +35,15 @@ public class RestartPlanTest { @Test public void testRestartPlan() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), - "foo", - null + AbstractStatus.State.RESTARTING.name(), "foo", null, null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); - tasks.add(new TaskState(3, AbstractStatus.State.RESTARTING.name(), "worker1", null)); - tasks.add(new TaskState(4, AbstractStatus.State.DESTROYED.name(), "worker1", null)); - tasks.add(new TaskState(5, AbstractStatus.State.RUNNING.name(), "worker1", null)); - tasks.add(new TaskState(6, AbstractStatus.State.RUNNING.name(), "worker1", null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); + tasks.add(new TaskState(3, AbstractStatus.State.RESTARTING.name(), "worker1", null, null)); + tasks.add(new TaskState(4, AbstractStatus.State.DESTROYED.name(), "worker1", null, null)); + tasks.add(new TaskState(5, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); + tasks.add(new TaskState(6, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); @@ -61,13 +59,11 @@ public void testRestartPlan() { @Test public void testNoRestartsPlan() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RUNNING.name(), - "foo", - null + AbstractStatus.State.RUNNING.name(), "foo", null, null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); RestartPlan restartPlan = new RestartPlan(restartRequest, connectorStateInfo); @@ -81,13 +77,11 @@ public void testNoRestartsPlan() { @Test public void testRestartsOnlyConnector() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), - "foo", - null + AbstractStatus.State.RESTARTING.name(), "foo", null, null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); RestartPlan restartPlan = new RestartPlan(restartRequest, connectorStateInfo); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java index 1ebb4e053196a..0b1f4efc85029 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java @@ -40,7 +40,6 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -73,6 +72,7 @@ public class SourceTaskOffsetCommitterTest { @BeforeEach public void setup() { Map workerProps = new HashMap<>(); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -94,7 +94,7 @@ public void testSchedule() { committer.schedule(taskId, task); assertNotNull(taskWrapper.getValue()); - assertEquals(singletonMap(taskId, commitFuture), committers); + assertEquals(Map.of(taskId, commitFuture), committers); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java index 6b8368e002c43..043cf59b3f12e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -37,9 +38,9 @@ public class SubmittedRecordsTest { - private static final Map PARTITION1 = Collections.singletonMap("subreddit", "apachekafka"); - private static final Map PARTITION2 = Collections.singletonMap("subreddit", "adifferentvalue"); - private static final Map PARTITION3 = Collections.singletonMap("subreddit", "asdfqweoicus"); + private static final Map PARTITION1 = Map.of("subreddit", "apachekafka"); + private static final Map PARTITION2 = Map.of("subreddit", "adifferentvalue"); + private static final Map PARTITION3 = Map.of("subreddit", "asdfqweoicus"); private AtomicInteger offset; @@ -68,22 +69,22 @@ public void testNoRecords() { @Test public void testNoCommittedRecords() { for (int i = 0; i < 3; i++) { - for (Map partition : Arrays.asList(PARTITION1, PARTITION2, PARTITION3)) { + for (Map partition : List.of(PARTITION1, PARTITION2, PARTITION3)) { submittedRecords.submit(partition, newOffset()); } } CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); } @Test @@ -94,7 +95,7 @@ public void testSingleAck() { CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); // Record has been submitted but not yet acked; cannot commit offsets for it yet assertFalse(committableOffsets.isEmpty()); - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION1); assertNoEmptyDeques(); @@ -102,7 +103,7 @@ public void testSingleAck() { committableOffsets = submittedRecords.committableOffsets(); // Record has been acked; can commit offsets for it assertFalse(committableOffsets.isEmpty()); - assertEquals(Collections.singletonMap(PARTITION1, offset), committableOffsets.offsets()); + assertEquals(Map.of(PARTITION1, offset), committableOffsets.offsets()); assertMetadataNoPending(committableOffsets, 1); // Everything has been ack'd and consumed; make sure that it's been cleaned up to avoid memory leaks @@ -110,7 +111,7 @@ public void testSingleAck() { committableOffsets = submittedRecords.committableOffsets(); // Old offsets should be wiped - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertTrue(committableOffsets.isEmpty()); } @@ -128,27 +129,27 @@ public void testMultipleAcksAcrossMultiplePartitions() { CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); // No records ack'd yet; can't commit any offsets - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 4, 2, 2, PARTITION1, PARTITION2); assertNoEmptyDeques(); partition1Record2.ack(); committableOffsets = submittedRecords.committableOffsets(); // One record has been ack'd, but a record that comes before it and corresponds to the same source partition hasn't been - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 4, 2, 2, PARTITION1, PARTITION2); assertNoEmptyDeques(); partition2Record1.ack(); committableOffsets = submittedRecords.committableOffsets(); // We can commit the first offset for the second partition - assertEquals(Collections.singletonMap(PARTITION2, partition2Offset1), committableOffsets.offsets()); + assertEquals(Map.of(PARTITION2, partition2Offset1), committableOffsets.offsets()); assertMetadata(committableOffsets, 1, 3, 2, 2, PARTITION1); assertNoEmptyDeques(); committableOffsets = submittedRecords.committableOffsets(); // No new offsets to commit - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 3, 2, 2, PARTITION1); assertNoEmptyDeques(); @@ -176,7 +177,7 @@ public void testRemoveLastSubmittedRecord() { SubmittedRecord submittedRecord = submittedRecords.submit(PARTITION1, newOffset()); CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION1); assertTrue(submittedRecord.drop(), "First attempt to remove record from submitted queue should succeed"); @@ -208,7 +209,7 @@ public void testRemoveNotLastSubmittedRecord() { committableOffsets = submittedRecords.committableOffsets(); // Even if SubmittedRecords::remove is broken, we haven't ack'd anything yet, so there should be no committable offsets - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION2); assertNoEmptyDeques(); // The only record for this partition has been removed; we shouldn't be tracking a deque for it anymore @@ -217,14 +218,14 @@ public void testRemoveNotLastSubmittedRecord() { recordToRemove.ack(); committableOffsets = submittedRecords.committableOffsets(); // Even though the record has somehow been acknowledged, it should not be counted when collecting committable offsets - assertEquals(Collections.emptyMap(), committableOffsets.offsets()); + assertEquals(Map.of(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION2); assertNoEmptyDeques(); lastSubmittedRecord.ack(); committableOffsets = submittedRecords.committableOffsets(); // Now that the last-submitted record has been ack'd, we should be able to commit its offset - assertEquals(Collections.singletonMap(PARTITION2, partition2Offset), committableOffsets.offsets()); + assertEquals(Map.of(PARTITION2, partition2Offset), committableOffsets.offsets()); assertMetadata(committableOffsets, 1, 0, 0, 0, (Map) null); assertFalse(committableOffsets.hasPending()); @@ -338,7 +339,7 @@ public void testAwaitMessagesReturnsAfterAsynchronousAck() throws Exception { } private void assertNoRemainingDeques() { - assertEquals(Collections.emptyMap(), submittedRecords.records, "Internal records map should be completely empty"); + assertEquals(Map.of(), submittedRecords.records, "Internal records map should be completely empty"); } @SafeVarargs @@ -355,7 +356,7 @@ private void assertNoEmptyDeques() { } private Map newOffset() { - return Collections.singletonMap("timestamp", offset.getAndIncrement()); + return Map.of("timestamp", offset.getAndIncrement()); } private void assertMetadataNoPending(CommittableOffsets committableOffsets, int committableMessages) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java index e2791a63f7b26..4b3935c35f8d7 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java @@ -26,7 +26,8 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import static java.util.Collections.singletonMap; +import java.util.Map; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -36,8 +37,8 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class TransformationStageTest { - private final SourceRecord initial = new SourceRecord(singletonMap("initial", 1), null, null, null, null); - private final SourceRecord transformed = new SourceRecord(singletonMap("transformed", 2), null, null, null, null); + private final SourceRecord initial = new SourceRecord(Map.of("initial", 1), null, null, null, null); + private final SourceRecord transformed = new SourceRecord(Map.of("transformed", 2), null, null, null, null); @Test public void apply() throws Exception { @@ -61,8 +62,12 @@ private void applyAndAssert(boolean predicateResult, boolean negate, SourceRecor } TransformationStage stage = new TransformationStage<>( predicatePlugin, + "testPredicate", + null, negate, transformationPlugin, + "testTransformation", + null, TestPlugins.noOpLoaderSwap() ); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java index 4ad4c11ee89cd..07fa131694b77 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java @@ -27,7 +27,6 @@ import org.mockito.MockedStatic; import org.mockito.internal.stubbing.answers.CallsRealMethods; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -59,7 +58,7 @@ public void teardown() { public void testLookupKafkaClusterId() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = Arrays.asList(broker1, broker2); + List cluster = List.of(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).build(); assertEquals(MockAdminClient.DEFAULT_CLUSTER_ID, WorkerConfig.lookupKafkaClusterId(adminClient)); @@ -69,7 +68,7 @@ public void testLookupKafkaClusterId() { public void testLookupNullKafkaClusterId() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = Arrays.asList(broker1, broker2); + List cluster = List.of(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).clusterId(null).build(); assertNull(WorkerConfig.lookupKafkaClusterId(adminClient)); @@ -79,7 +78,7 @@ public void testLookupNullKafkaClusterId() { public void testLookupKafkaClusterIdTimeout() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = Arrays.asList(broker1, broker2); + List cluster = List.of(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).build(); adminClient.timeoutNextRequest(1); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java index c3a8f151750ec..9aae6848d1940 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java @@ -27,7 +27,6 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -66,13 +65,13 @@ public class WorkerConfigTransformerTest { @BeforeEach public void setup() { - configTransformer = new WorkerConfigTransformer(worker, Collections.singletonMap("test", new TestConfigProvider())); + configTransformer = new WorkerConfigTransformer(worker, Map.of("test", new TestConfigProvider())); } @Test public void testReplaceVariable() { // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKey}")); + Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKey}")); // Assertions assertEquals(TEST_RESULT, result.get(MY_KEY)); @@ -97,7 +96,7 @@ public void testReplaceVariableWithTTLAndScheduleRestart() { when(herder.restartConnector(eq(1L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId); // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); + Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY)); @@ -112,14 +111,14 @@ public void testReplaceVariableWithTTLFirstCancelThenScheduleRestart() { when(herder.restartConnector(eq(10L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId); // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); + Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY)); verify(herder).restartConnector(eq(1L), eq(MY_CONNECTOR), notNull()); // Execution - result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithLongerTTL}")); + result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithLongerTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_LONGER_TTL, result.get(MY_KEY)); @@ -147,14 +146,14 @@ public ConfigData get(String path) { public ConfigData get(String path, Set keys) { if (path.equals(TEST_PATH)) { if (keys.contains(TEST_KEY)) { - return new ConfigData(Collections.singletonMap(TEST_KEY, TEST_RESULT)); + return new ConfigData(Map.of(TEST_KEY, TEST_RESULT)); } else if (keys.contains(TEST_KEY_WITH_TTL)) { - return new ConfigData(Collections.singletonMap(TEST_KEY_WITH_TTL, TEST_RESULT_WITH_TTL), 1L); + return new ConfigData(Map.of(TEST_KEY_WITH_TTL, TEST_RESULT_WITH_TTL), 1L); } else if (keys.contains(TEST_KEY_WITH_LONGER_TTL)) { - return new ConfigData(Collections.singletonMap(TEST_KEY_WITH_LONGER_TTL, TEST_RESULT_WITH_LONGER_TTL), 10L); + return new ConfigData(Map.of(TEST_KEY_WITH_LONGER_TTL, TEST_RESULT_WITH_LONGER_TTL), 10L); } } - return new ConfigData(Collections.emptyMap()); + return new ConfigData(Map.of()); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index 2607ee8b03b3c..ce052dd243969 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -74,9 +74,7 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -91,8 +89,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; -import static java.util.Arrays.asList; -import static java.util.Collections.singleton; import static org.apache.kafka.connect.runtime.WorkerTestUtils.getTransformationChain; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -135,7 +131,7 @@ public class WorkerSinkTaskTest { private static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); private static final Set INITIAL_ASSIGNMENT = - new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); + Set.of(TOPIC_PARTITION, TOPIC_PARTITION2); private static final Map TASK_PROPS = new HashMap<>(); @@ -183,6 +179,7 @@ public class WorkerSinkTaskTest { public void setUp() { time = new MockTime(); Map workerProps = new HashMap<>(); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -197,11 +194,11 @@ private void createTask(TargetState initialState) { } private void createTask(TargetState initialState, TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { - createTask(initialState, keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, transformationChain); + createTask(initialState, keyConverter, valueConverter, headerConverter, toleranceOperator, List::of, transformationChain); } private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { - createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); + createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), List::of, transformationChain); } private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, @@ -229,7 +226,7 @@ private void createTask(ConnectorTaskId taskId, SinkTask task, TaskStatus.Listen taskId, task, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, connectMetrics, keyConverterPlugin, valueConverterPlugin, errorMetrics, headerConverterPlugin, transformationChain, consumer, loader, time, - retryWithToleranceOperator, null, statusBackingStore, errorReportersSupplier, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, null, statusBackingStore, errorReportersSupplier, null, TestPlugins.noOpLoaderSwap()); } @AfterEach @@ -318,7 +315,7 @@ public void testPause() { verify(consumer).wakeup(); // Offset commit as requested when pausing; No records returned by consumer.poll() - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); workerTask.iteration(); // now paused time.sleep(30000L); @@ -340,7 +337,7 @@ public void testPause() { // And unpause verify(statusListener).onResume(taskId); verify(consumer, times(2)).wakeup(); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(singleton(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); verify(sinkTask, times(4)).put(anyList()); } @@ -363,7 +360,7 @@ public void testShutdown() throws Exception { sinkTaskContext.getValue().requestCommit(); // Force an offset commit // second iteration - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); workerTask.iteration(); verify(sinkTask, times(2)).put(anyList()); @@ -441,7 +438,7 @@ public void testPollRedelivery() { time.sleep(30000L); verify(sinkTask, times(3)).put(anyList()); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); assertSinkMetricValue("sink-record-read-total", 1.0); assertSinkMetricValue("sink-record-send-total", 1.0); @@ -483,14 +480,14 @@ public void testPollRedeliveryWithConsumerRebalance() { workerTask.initializeAndStart(); verifyInitializeTask(); - Set newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); + Set newAssignment = Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) .thenReturn(newAssignment, newAssignment, newAssignment) - .thenReturn(Collections.singleton(TOPIC_PARTITION3), - Collections.singleton(TOPIC_PARTITION3), - Collections.singleton(TOPIC_PARTITION3)); + .thenReturn(Set.of(TOPIC_PARTITION3), + Set.of(TOPIC_PARTITION3), + Set.of(TOPIC_PARTITION3)); INITIAL_ASSIGNMENT.forEach(tp -> when(consumer.position(tp)).thenReturn(FIRST_OFFSET)); when(consumer.position(TOPIC_PARTITION3)).thenReturn(FIRST_OFFSET); @@ -503,8 +500,8 @@ public void testPollRedeliveryWithConsumerRebalance() { .thenAnswer(expectConsumerPoll(1)) // Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); - rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Set.of()); + rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) .thenAnswer(expectConsumerPoll(0)) @@ -513,7 +510,7 @@ public void testPollRedeliveryWithConsumerRebalance() { ConsumerRecord newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE); rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT); - rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList()); + rebalanceListener.getValue().onPartitionsAssigned(List.of()); return new ConsumerRecords<>(Map.of(TOPIC_PARTITION3, List.of(newRecord)), Map.of(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 1, Optional.empty(), ""))); }); @@ -534,7 +531,7 @@ public void testPollRedeliveryWithConsumerRebalance() { verify(consumer).pause(INITIAL_ASSIGNMENT); workerTask.iteration(); - verify(sinkTask).open(Collections.singleton(TOPIC_PARTITION3)); + verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); // All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue verify(consumer).pause(newAssignment); @@ -543,13 +540,13 @@ public void testPollRedeliveryWithConsumerRebalance() { final Map offsets = INITIAL_ASSIGNMENT.stream() .collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET))); when(sinkTask.preCommit(offsets)).thenReturn(offsets); - newAssignment = Collections.singleton(TOPIC_PARTITION3); + newAssignment = Set.of(TOPIC_PARTITION3); workerTask.iteration(); verify(sinkTask).close(INITIAL_ASSIGNMENT); // All partitions are resumed, as all previously paused-for-redelivery partitions were revoked - newAssignment.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); + newAssignment.forEach(tp -> verify(consumer).resume(Set.of(tp))); } @Test @@ -636,10 +633,10 @@ public void testPartialRevocationAndAssignment() { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT) - .thenReturn(Collections.singleton(TOPIC_PARTITION2)) - .thenReturn(Collections.singleton(TOPIC_PARTITION2)) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT); @@ -656,18 +653,18 @@ public void testPartialRevocationAndAssignment() { return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(singleton(TOPIC_PARTITION)); - rebalanceListener.getValue().onPartitionsAssigned(Collections.emptySet()); + rebalanceListener.getValue().onPartitionsRevoked(Set.of(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsAssigned(Set.of()); return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); - rebalanceListener.getValue().onPartitionsAssigned(singleton(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Set.of()); + rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsLost(singleton(TOPIC_PARTITION3)); - rebalanceListener.getValue().onPartitionsAssigned(singleton(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsLost(Set.of(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION)); return ConsumerRecords.empty(); }); @@ -683,19 +680,19 @@ public void testPartialRevocationAndAssignment() { // Second iteration--second call to poll, partial consumer revocation workerTask.iteration(); - verify(sinkTask).close(singleton(TOPIC_PARTITION)); - verify(sinkTask, times(2)).put(Collections.emptyList()); + verify(sinkTask).close(Set.of(TOPIC_PARTITION)); + verify(sinkTask, times(2)).put(List.of()); // Third iteration--third call to poll, partial consumer assignment workerTask.iteration(); - verify(sinkTask).open(singleton(TOPIC_PARTITION3)); - verify(sinkTask, times(3)).put(Collections.emptyList()); + verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); + verify(sinkTask, times(3)).put(List.of()); // Fourth iteration--fourth call to poll, one partition lost; can't commit offsets for it, one new partition assigned workerTask.iteration(); - verify(sinkTask).close(singleton(TOPIC_PARTITION3)); - verify(sinkTask).open(singleton(TOPIC_PARTITION)); - verify(sinkTask, times(4)).put(Collections.emptyList()); + verify(sinkTask).close(Set.of(TOPIC_PARTITION3)); + verify(sinkTask).open(Set.of(TOPIC_PARTITION)); + verify(sinkTask, times(4)).put(List.of()); } @Test @@ -710,12 +707,12 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))); + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)); INITIAL_ASSIGNMENT.forEach(tp -> when(consumer.position(tp)).thenReturn(FIRST_OFFSET)); when(consumer.position(TOPIC_PARTITION3)).thenReturn(FIRST_OFFSET); @@ -730,14 +727,14 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { .thenAnswer(expectConsumerPoll(1)) // Third poll; assignment changes to [TP2] .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Collections.singleton(TOPIC_PARTITION)); - rebalanceListener.getValue().onPartitionsAssigned(Collections.emptySet()); + rebalanceListener.getValue().onPartitionsRevoked(Set.of(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsAssigned(Set.of()); return ConsumerRecords.empty(); }) // Fourth poll; assignment changes to [TP2, TP3] .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); - rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Set.of()); + rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) // Fifth poll; an offset commit takes place @@ -756,13 +753,13 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { doNothing().when(consumer).commitSync(offsets); workerTask.iteration(); - verify(sinkTask).close(Collections.singleton(TOPIC_PARTITION)); - verify(sinkTask, times(2)).put(Collections.emptyList()); + verify(sinkTask).close(Set.of(TOPIC_PARTITION)); + verify(sinkTask, times(2)).put(List.of()); // Fourth iteration--fourth call to poll, partial consumer assignment workerTask.iteration(); - verify(sinkTask).open(Collections.singleton(TOPIC_PARTITION3)); + verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); final Map workerCurrentOffsets = new HashMap<>(); workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET)); @@ -819,7 +816,7 @@ public void testWakeupInCommitSyncCausesRetry() { verify(sinkTask).close(INITIAL_ASSIGNMENT); verify(sinkTask, times(2)).open(INITIAL_ASSIGNMENT); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); verify(statusListener).onResume(taskId); @@ -905,7 +902,7 @@ public void testRaisesFailedRetriableExceptionFromConvert() { @Test public void testSkipsFailedRetriableExceptionFromConvert() { createTask(initialState, keyConverter, valueConverter, headerConverter, - RetryWithToleranceOperatorTest.allOperator(), Collections::emptyList, transformationChain); + RetryWithToleranceOperatorTest.allOperator(), List::of, transformationChain); workerTask.initialize(TASK_CONFIG); workerTask.initializeAndStart(); @@ -923,7 +920,7 @@ public void testSkipsFailedRetriableExceptionFromConvert() { workerTask.iteration(); workerTask.execute(); - verify(sinkTask, times(3)).put(Collections.emptyList()); + verify(sinkTask, times(3)).put(List.of()); } @Test @@ -974,7 +971,7 @@ public void testSkipsFailedRetriableExceptionFromTransform() { workerTask.iteration(); workerTask.execute(); - verify(sinkTask, times(3)).put(Collections.emptyList()); + verify(sinkTask, times(3)).put(List.of()); } @Test @@ -1273,7 +1270,7 @@ public void testSinkTasksHandleCloseErrors() { .when(sinkTask).put(anyList()); Throwable closeException = new RuntimeException(); - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); // Throw another exception while closing the task's assignment doThrow(closeException).when(sinkTask).close(any(Collection.class)); @@ -1310,7 +1307,7 @@ public void testSuppressCloseErrors() { .doThrow(putException) .when(sinkTask).put(anyList()); - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); // Throw another exception while closing the task's assignment doThrow(closeException).when(sinkTask).close(any(Collection.class)); @@ -1390,7 +1387,7 @@ public void testCommitWithOutOfOrderCallback() { workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET)); final List originalPartitions = new ArrayList<>(INITIAL_ASSIGNMENT); - final List rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + final List rebalancedPartitions = List.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); final Map rebalanceOffsets = new HashMap<>(); rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION)); rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2)); @@ -1532,7 +1529,10 @@ public void testCommitWithOutOfOrderCallback() { assertEquals(rebalanceOffsets, workerTask.lastCommittedOffsets()); // onPartitionsRevoked - verify(sinkTask).close(new ArrayList<>(workerCurrentOffsets.keySet())); + ArgumentCaptor> closeCaptor = ArgumentCaptor.forClass(Collection.class); + verify(sinkTask).close(closeCaptor.capture()); + Collection actualClosePartitions = closeCaptor.getValue(); + assertEquals(workerCurrentOffsets.keySet(), new HashSet<>(actualClosePartitions)); verify(consumer).commitSync(anyMap()); // onPartitionsAssigned - step 2 @@ -1816,7 +1816,7 @@ public void testHeadersWithCustomConverter() { expectPollInitialAssignment() .thenAnswer((Answer>) invocation -> { - List> records = Arrays.asList( + List> records = List.of( new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, keyA.getBytes(), valueA.getBytes(encodingA), headersA, Optional.empty()), new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 2, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, @@ -1880,7 +1880,7 @@ public void testPartitionCountInCaseOfPartitionRevocation() { createTask(taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, mockConsumer, pluginLoader, time, - RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, Collections::emptyList); + RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, List::of); mockConsumer.updateBeginningOffsets( new HashMap<>() {{ put(TOPIC_PARTITION, 0L); @@ -1893,7 +1893,7 @@ public void testPartitionCountInCaseOfPartitionRevocation() { mockConsumer.rebalance(INITIAL_ASSIGNMENT); assertSinkMetricValue("partition-count", 2); // Revoked "TOPIC_PARTITION" and second re-balance with "TOPIC_PARTITION2" - mockConsumer.rebalance(Collections.singleton(TOPIC_PARTITION2)); + mockConsumer.rebalance(Set.of(TOPIC_PARTITION2)); assertSinkMetricValue("partition-count", 1); // Closing the Worker Sink Task which will update the partition count as 0. workerTask.close(); @@ -1901,12 +1901,12 @@ public void testPartitionCountInCaseOfPartitionRevocation() { } private void expectRebalanceRevocationError(RuntimeException e) { - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); doThrow(e).when(sinkTask).close(INITIAL_ASSIGNMENT); } private void expectRebalanceAssignmentError(RuntimeException e) { - when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); + when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); when(consumer.position(TOPIC_PARTITION)).thenReturn(FIRST_OFFSET); when(consumer.position(TOPIC_PARTITION2)).thenReturn(FIRST_OFFSET); @@ -1914,7 +1914,7 @@ private void expectRebalanceAssignmentError(RuntimeException e) { } private void verifyInitializeTask() { - verify(consumer).subscribe(eq(Collections.singletonList(TOPIC)), rebalanceListener.capture()); + verify(consumer).subscribe(eq(List.of(TOPIC)), rebalanceListener.capture()); verify(sinkTask).initialize(sinkTaskContext.capture()); verify(sinkTask).start(TASK_PROPS); } @@ -1934,7 +1934,7 @@ private OngoingStubbing> expectPollInitialAssign private void verifyPollInitialAssignment() { verify(sinkTask).open(INITIAL_ASSIGNMENT); verify(consumer, atLeastOnce()).assignment(); - verify(sinkTask).put(Collections.emptyList()); + verify(sinkTask).put(List.of()); } private Answer> expectConsumerPoll(final int numMessages) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java index 740211180984a..729b5f0436c2b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java @@ -61,18 +61,14 @@ import java.io.IOException; import java.time.Duration; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Function; -import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -111,8 +107,8 @@ public class WorkerSinkTaskThreadedTest { private static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); private static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new TopicPartition(TOPIC, 200); - private static final Set INITIAL_ASSIGNMENT = new HashSet<>(Arrays.asList( - TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); + private static final Set INITIAL_ASSIGNMENT = Set.of( + TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); private static final Map TASK_PROPS = new HashMap<>(); private static final long TIMESTAMP = 42L; @@ -175,6 +171,7 @@ public void setup() { workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); WorkerConfig workerConfig = new StandaloneConfig(workerProps); Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); @@ -183,7 +180,7 @@ public void setup() { taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, consumer, pluginLoader, time, RetryWithToleranceOperatorTest.noneOperator(), null, statusBackingStore, - Collections::emptyList, TestPlugins.noOpLoaderSwap()); + List::of, null, TestPlugins.noOpLoaderSwap()); recordsReturned = 0; } @@ -439,7 +436,7 @@ public void testAssignmentPauseResume() { doAnswer(invocation -> { return null; // initial assignment }).doAnswer(invocation -> { - assertEquals(new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)), sinkTaskContext.getValue().assignment()); + assertEquals(Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3), sinkTaskContext.getValue().assignment()); return null; }).doAnswer(invocation -> { try { @@ -461,11 +458,11 @@ public void testAssignmentPauseResume() { return null; }).when(sinkTask).put(any(Collection.class)); - doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).pause(singletonList(UNASSIGNED_TOPIC_PARTITION)); - doAnswer(invocation -> null).when(consumer).pause(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); + doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).pause(List.of(UNASSIGNED_TOPIC_PARTITION)); + doAnswer(invocation -> null).when(consumer).pause(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); - doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).resume(singletonList(UNASSIGNED_TOPIC_PARTITION)); - doAnswer(invocation -> null).when(consumer).resume(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); + doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).resume(List.of(UNASSIGNED_TOPIC_PARTITION)); + doAnswer(invocation -> null).when(consumer).resume(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); workerTask.initialize(TASK_CONFIG); workerTask.initializeAndStart(); @@ -482,8 +479,8 @@ public void testAssignmentPauseResume() { verifyStopTask(); verifyTaskGetTopic(3); - verify(consumer, atLeastOnce()).pause(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); - verify(consumer, atLeastOnce()).resume(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); + verify(consumer, atLeastOnce()).pause(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); + verify(consumer, atLeastOnce()).resume(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); } @Test @@ -558,7 +555,7 @@ public void testRewindOnRebalanceDuringPoll() { } private void verifyInitializeTask() { - verify(consumer).subscribe(eq(singletonList(TOPIC)), rebalanceListener.capture()); + verify(consumer).subscribe(eq(List.of(TOPIC)), rebalanceListener.capture()); verify(sinkTask).initialize(sinkTaskContext.capture()); verify(sinkTask).start(TASK_PROPS); } @@ -571,7 +568,7 @@ private void expectInitialAssignment() { private void verifyInitialAssignment() { verify(sinkTask).open(INITIAL_ASSIGNMENT); - verify(sinkTask).put(Collections.emptyList()); + verify(sinkTask).put(List.of()); } private void verifyStopTask() { @@ -615,7 +612,7 @@ private void expectPolls(final long pollDelayMs) { @SuppressWarnings("SameParameterValue") private void expectRebalanceDuringPoll(long startOffset) { - final List partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + final List partitions = List.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); final Map offsets = new HashMap<>(); offsets.put(TOPIC_PARTITION, startOffset); @@ -652,7 +649,7 @@ private void expectPreCommit(ExpectOffsetCommitCommand... commands) { @Override public Object answer(InvocationOnMock invocation) { ExpectOffsetCommitCommand commitCommand = commands[index++]; - // All assigned partitions will have offsets committed, but we've only processed messages/updated + // All assigned partitions will have offsets committed, but we've only processed messages/updated // offsets for one final Map offsetsToCommit = offsetsToCommitFn.apply(commitCommand.expectedMessages); @@ -665,7 +662,7 @@ public Object answer(InvocationOnMock invocation) { } }).when(sinkTask).preCommit(anyMap()); } - + private void expectOffsetCommit(ExpectOffsetCommitCommand... commands) { doAnswer(new Answer<>() { int index = 0; @@ -723,19 +720,8 @@ private RecordHeaders emptyHeaders() { private abstract static class TestSinkTask extends SinkTask { } - private static class ExpectOffsetCommitCommand { - final long expectedMessages; - final RuntimeException error; - final Exception consumerCommitError; - final long consumerCommitDelayMs; - final boolean invokeCallback; - - private ExpectOffsetCommitCommand(long expectedMessages, RuntimeException error, Exception consumerCommitError, long consumerCommitDelayMs, boolean invokeCallback) { - this.expectedMessages = expectedMessages; - this.error = error; - this.consumerCommitError = consumerCommitError; - this.consumerCommitDelayMs = consumerCommitDelayMs; - this.invokeCallback = invokeCallback; - } + private record ExpectOffsetCommitCommand(long expectedMessages, RuntimeException error, + Exception consumerCommitError, long consumerCommitDelayMs, + boolean invokeCallback) { } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java index 23fb3618f8191..4fca3f8ac6153 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java @@ -71,8 +71,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -128,8 +126,8 @@ public class WorkerSourceTaskTest { public static final String POLL_TIMEOUT_MSG = "Timeout waiting for poll"; private static final String TOPIC = "topic"; - private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); - private static final Map OFFSET = Collections.singletonMap("key", 12); + private static final Map PARTITION = Map.of("key", "partition".getBytes()); + private static final Map OFFSET = Map.of("key", 12); // Connect-format data private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA; @@ -185,7 +183,7 @@ public class WorkerSourceTaskTest { private static final TaskConfig TASK_CONFIG = new TaskConfig(TASK_PROPS); - private static final List RECORDS = Collections.singletonList( + private static final List RECORDS = List.of( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD) ); @@ -204,6 +202,7 @@ private Map workerProps(boolean enableTopicCreation) { props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("offset.storage.file.filename", "/tmp/connect.offsets"); props.put(TOPIC_CREATION_ENABLE_CONFIG, String.valueOf(enableTopicCreation)); + props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return props; } @@ -255,7 +254,7 @@ private void createWorkerTask(TargetState initialState, Converter keyConverter, workerTask = new WorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, plugins.delegatingLoader(), Time.SYSTEM, - retryWithToleranceOperator, statusBackingStore, Runnable::run, Collections::emptyList, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, statusBackingStore, Runnable::run, List::of, null, TestPlugins.noOpLoaderSwap()); } @ParameterizedTest @@ -504,7 +503,7 @@ public void testCommit(boolean enableTopicCreation) throws Exception { final CountDownLatch pollLatch = expectPolls(1); expectTopicCreation(TOPIC); - expectBeginFlush(Arrays.asList(true, false).iterator()::next); + expectBeginFlush(List.of(true, false).iterator()::next); expectOffsetFlush(true, true); workerTask.initialize(TASK_CONFIG); @@ -591,9 +590,9 @@ public void testSendRecordsRetries(boolean enableTopicCreation) { .thenAnswer(producerSendAnswer(true)); // Try to send 3, make first pass, second fail. Should save last two - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); workerTask.sendRecords(); - assertEquals(Arrays.asList(record2, record3), workerTask.toSend); + assertEquals(List.of(record2, record3), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -613,7 +612,7 @@ public void testSendRecordsProducerCallbackFail(boolean enableTopicCreation) { expectSendRecordProducerCallbackFail(); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, () -> workerTask.sendRecords()); verify(transformationChain, times(2)).apply(any(), any(SourceRecord.class)); @@ -636,7 +635,7 @@ public void testSendRecordsProducerSendFailsImmediately(boolean enableTopicCreat when(producer.send(any(ProducerRecord.class), any(Callback.class))) .thenThrow(new KafkaException("Producer closed while send in progress", new InvalidTopicException(TOPIC))); - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); assertThrows(ConnectException.class, () -> workerTask.sendRecords()); } @@ -660,7 +659,7 @@ public void testSendRecordsTaskCommitRecordFail(boolean enableTopicCreation) thr .doNothing() .when(sourceTask).commitRecord(any(SourceRecord.class), any(RecordMetadata.class)); - workerTask.toSend = Arrays.asList(record1, record2, record3); + workerTask.toSend = List.of(record1, record2, record3); workerTask.sendRecords(); assertNull(workerTask.toSend); } @@ -673,7 +672,7 @@ public void testSourceTaskIgnoresProducerException(boolean enableTopicCreation) expectTopicCreation(TOPIC); //Use different offsets for each record, so we can verify all were committed - final Map offset2 = Collections.singletonMap("key", 13); + final Map offset2 = Map.of("key", 13); // send two records // record 1 will succeed @@ -690,7 +689,7 @@ public void testSourceTaskIgnoresProducerException(boolean enableTopicCreation) .thenAnswer(producerSendAnswer(false)); //Send records and then commit offsets and verify both were committed and no exception - workerTask.toSend = Arrays.asList(record1, record2); + workerTask.toSend = List.of(record1, record2); workerTask.sendRecords(); workerTask.updateCommittableOffsets(); workerTask.commitOffsets(); @@ -755,8 +754,8 @@ public void testCancel(boolean enableTopicCreation) { } private TopicAdmin.TopicCreationResponse createdTopic(String topic) { - Set created = Collections.singleton(topic); - Set existing = Collections.emptySet(); + Set created = Set.of(topic); + Set existing = Set.of(); return new TopicAdmin.TopicCreationResponse(created, existing); } @@ -774,7 +773,7 @@ private CountDownLatch expectEmptyPolls(final AtomicInteger count) throws Interr count.incrementAndGet(); latch.countDown(); Thread.sleep(10); - return Collections.emptyList(); + return List.of(); }); return latch; } @@ -896,7 +895,7 @@ private void expectOffsetFlush() throws Exception { private void expectOffsetFlush(Boolean... succeedList) throws Exception { Future flushFuture = mock(Future.class); when(offsetWriter.doFlush(any(org.apache.kafka.connect.util.Callback.class))).thenReturn(flushFuture); - LinkedList succeedQueue = new LinkedList<>(Arrays.asList(succeedList)); + LinkedList succeedQueue = new LinkedList<>(List.of(succeedList)); doAnswer(invocationOnMock -> { boolean succeed = succeedQueue.pop(); @@ -993,7 +992,7 @@ private void verifyClose() { private void expectTopicCreation(String topic) { if (config.topicCreationEnable()) { - when(admin.describeTopics(topic)).thenReturn(Collections.emptyMap()); + when(admin.describeTopics(topic)).thenReturn(Map.of()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(createdTopic(topic)); } } @@ -1018,7 +1017,7 @@ private void assertShouldSkipCommit() { committerAppender.setClassLogger(SourceTaskOffsetCommitter.class, org.apache.logging.log4j.Level.TRACE); taskAppender.setClassLogger(WorkerSourceTask.class, org.apache.logging.log4j.Level.TRACE); SourceTaskOffsetCommitter.commit(workerTask); - assertEquals(Collections.emptyList(), taskAppender.getMessages()); + assertEquals(List.of(), taskAppender.getMessages()); List committerMessages = committerAppender.getMessages(); assertEquals(1, committerMessages.size()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java index eae9c96998b2c..fa445454fd088 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java @@ -300,7 +300,7 @@ public TestWorkerTask(ConnectorTaskId id, Listener statusListener, TargetState i Supplier>> errorReporterSupplier, Time time, StatusBackingStore statusBackingStore) { super(id, statusListener, initialState, loader, connectMetrics, errorHandlingMetrics, - retryWithToleranceOperator, transformationChain, errorReporterSupplier, time, statusBackingStore, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, transformationChain, errorReporterSupplier, time, statusBackingStore, null, TestPlugins.noOpLoaderSwap()); } @Override @@ -318,6 +318,11 @@ protected void execute() { @Override protected void close() { } + + @Override + protected String taskVersion() { + return null; + } } protected void assertFailedMetric(TaskMetricsGroup metricsGroup) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java index 71dcabbedb669..e29eeebe88d60 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java @@ -63,6 +63,7 @@ import org.apache.kafka.connect.runtime.isolation.PluginClassLoader; import org.apache.kafka.connect.runtime.isolation.Plugins; import org.apache.kafka.connect.runtime.isolation.Plugins.ClassLoaderUsage; +import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.rest.RestServer; import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets; import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; @@ -104,7 +105,6 @@ import org.mockito.quality.Strictness; import java.lang.management.ManagementFactory; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -249,6 +249,7 @@ public void setup(boolean enableTopicCreation) { .strictness(Strictness.STRICT_STUBS) .startMocking(); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -329,7 +330,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, noneConnectorClientConfigOverridePolicy); worker.start(); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); FutureCallback onFirstStart = new FutureCallback<>(); @@ -337,7 +338,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa // Wait for the connector to actually start assertEquals(TargetState.STARTED, onFirstStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); FutureCallback onSecondStart = new FutureCallback<>(); @@ -351,7 +352,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); @@ -372,7 +373,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa private void mockFileConfigProvider() { MockFileConfigProvider mockFileConfigProvider = new MockFileConfigProvider(); - mockFileConfigProvider.configure(Collections.singletonMap("testId", mockFileProviderTestId)); + mockFileConfigProvider.configure(Map.of("testId", mockFileProviderTestId)); when(plugins.newConfigProvider(any(AbstractConfig.class), eq("config.providers.file"), any(ClassLoaderUsage.class))) @@ -408,7 +409,7 @@ public void testStartConnectorFailure(boolean enableTopicCreation) throws Except } assertStartupStatistics(worker, 1, 1, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 1, 0, 0); @@ -439,19 +440,19 @@ public void testAddConnectorByAlias(boolean enableTopicCreation) throws Throwabl worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); FutureCallback onStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); assertStatistics(worker, 1, 0); assertStartupStatistics(worker, 1, 0, 0, 0); worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -485,17 +486,17 @@ public void testAddConnectorByShortAlias(boolean enableTopicCreation) throws Thr worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); FutureCallback onStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); assertStatistics(worker, 1, 0); worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -536,8 +537,8 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th mockVersionedConnectorIsolation(connectorClass, null, sinkConnector); mockExecutorRealSubmit(WorkerConnector.class); - Map taskProps = Collections.singletonMap("foo", "bar"); - when(sinkConnector.taskConfigs(2)).thenReturn(Arrays.asList(taskProps, taskProps)); + Map taskProps = Map.of("foo", "bar"); + when(sinkConnector.taskConfigs(2)).thenReturn(List.of(taskProps, taskProps)); // Use doReturn().when() syntax due to when().thenReturn() not being able to return wildcard generic types doReturn(TestSourceTask.class).when(sinkConnector).taskClass(); @@ -551,13 +552,13 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); FutureCallback onFirstStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onFirstStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onFirstStart.get(1000, TimeUnit.MILLISECONDS)); assertStatistics(worker, 1, 0); - assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); FutureCallback onSecondStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onSecondStart); @@ -580,7 +581,7 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -609,7 +610,7 @@ public void testAddRemoveSourceTask(boolean enableTopicCreation) { mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -617,28 +618,28 @@ public void testAddRemoveSourceTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); ClusterConfigState configState = new ClusterConfigState( 0, null, - Collections.singletonMap(CONNECTOR_ID, 1), - Collections.singletonMap(CONNECTOR_ID, connectorConfigs), - Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), - Collections.singletonMap(TASK_ID, origProps), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_ID, 1), + Map.of(CONNECTOR_ID, connectorConfigs), + Map.of(CONNECTOR_ID, TargetState.STARTED), + Map.of(TASK_ID, origProps), + Map.of(), + Map.of(), + Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Set.of(), + Set.of() ); assertTrue(worker.startSourceTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED)); assertStatistics(worker, 0, 1); - assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); + assertEquals(Set.of(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -665,7 +666,7 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -673,7 +674,7 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); connectorConfigs.put(TOPICS_CONFIG, "t1"); connectorConfigs.put(CONNECTOR_CLASS_CONFIG, SampleSinkConnector.class.getName()); @@ -681,22 +682,22 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { ClusterConfigState configState = new ClusterConfigState( 0, null, - Collections.singletonMap(CONNECTOR_ID, 1), - Collections.singletonMap(CONNECTOR_ID, connectorConfigs), - Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), - Collections.singletonMap(TASK_ID, origProps), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_ID, 1), + Map.of(CONNECTOR_ID, connectorConfigs), + Map.of(CONNECTOR_ID, TargetState.STARTED), + Map.of(TASK_ID, origProps), + Map.of(), + Map.of(), + Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Set.of(), + Set.of() ); assertTrue(worker.startSinkTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED)); assertStatistics(worker, 0, 1); - assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); + assertEquals(Set.of(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -740,7 +741,7 @@ public void testAddRemoveExactlyOnceSourceTask(boolean enableTopicCreation) { Runnable preProducer = mock(Runnable.class); Runnable postProducer = mock(Runnable.class); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -748,29 +749,29 @@ public void testAddRemoveExactlyOnceSourceTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); ClusterConfigState configState = new ClusterConfigState( 0, null, - Collections.singletonMap(CONNECTOR_ID, 1), - Collections.singletonMap(CONNECTOR_ID, connectorConfigs), - Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), - Collections.singletonMap(TASK_ID, origProps), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_ID, 1), + Map.of(CONNECTOR_ID, connectorConfigs), + Map.of(CONNECTOR_ID, TargetState.STARTED), + Map.of(TASK_ID, origProps), + Map.of(), + Map.of(), + Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Set.of(), + Set.of() ); assertTrue(worker.startExactlyOnceSourceTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED, preProducer, postProducer)); assertStatistics(worker, 0, 1); - assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); + assertEquals(Set.of(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -791,7 +792,7 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { mockStorage(); mockFileConfigProvider(); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); @@ -805,11 +806,11 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { // Each time we check the task metrics, the worker will call the herder when(herder.taskStatus(TASK_ID)).thenReturn( - new ConnectorStateInfo.TaskState(0, "RUNNING", "worker", "msg"), - new ConnectorStateInfo.TaskState(0, "PAUSED", "worker", "msg"), - new ConnectorStateInfo.TaskState(0, "FAILED", "worker", "msg"), - new ConnectorStateInfo.TaskState(0, "DESTROYED", "worker", "msg"), - new ConnectorStateInfo.TaskState(0, "UNASSIGNED", "worker", "msg") + new ConnectorStateInfo.TaskState(0, "RUNNING", "worker", "msg", null), + new ConnectorStateInfo.TaskState(0, "PAUSED", "worker", "msg", null), + new ConnectorStateInfo.TaskState(0, "FAILED", "worker", "msg", null), + new ConnectorStateInfo.TaskState(0, "DESTROYED", "worker", "msg", null), + new ConnectorStateInfo.TaskState(0, "UNASSIGNED", "worker", "msg", null) ); worker = new Worker(WORKER_ID, @@ -826,7 +827,7 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 0, 0, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); worker.startSourceTask( TASK_ID, ClusterConfigState.EMPTY, @@ -904,7 +905,7 @@ public void testStartTaskFailure(boolean enableTopicCreation) { mockInternalConverters(); mockFileConfigProvider(); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, "missing.From.This.Workers.Classpath"); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, "missing.From.This.Workers.Classpath"); mockKafkaClusterId(); mockGenericIsolation(); @@ -921,7 +922,7 @@ public void testStartTaskFailure(boolean enableTopicCreation) { assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 0, 0, 1, 1); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); verify(taskStatusListener).onFailure(eq(TASK_ID), any(ConfigException.class)); verifyKafkaClusterId(); @@ -947,7 +948,7 @@ public void testCleanupTasksOnStop(boolean enableTopicCreation) { mockVersionedTaskHeaderConverterFromWorker(taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); @@ -989,7 +990,7 @@ public void testConverterOverrides(boolean enableTopicCreation) { mockStorage(); mockFileConfigProvider(); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); mockKafkaClusterId(); @@ -1008,16 +1009,16 @@ public void testConverterOverrides(boolean enableTopicCreation) { worker.herder = herder; worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); Map connProps = anyConnectorConfigMap(); connProps.put(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, SampleConverterWithHeaders.class.getName()); connProps.put(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, SampleConverterWithHeaders.class.getName()); worker.startSourceTask(TASK_ID, ClusterConfigState.EMPTY, connProps, origProps, taskStatusListener, TargetState.STARTED); assertStatistics(worker, 0, 1); - assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); + assertEquals(Set.of(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -1205,7 +1206,7 @@ public void testAdminConfigsClientOverridesWithAllPolicy(boolean enableTopicCrea props.put("consumer.bootstrap.servers", "localhost:4761"); WorkerConfig configWithOverrides = new StandaloneConfig(props); - Map connConfig = Collections.singletonMap("metadata.max.age.ms", "10000"); + Map connConfig = Map.of("metadata.max.age.ms", "10000"); Map expectedConfigs = new HashMap<>(workerProps); expectedConfigs.remove(AbstractConfig.CONFIG_PROVIDERS_CONFIG); expectedConfigs.put("bootstrap.servers", "localhost:9092"); @@ -1230,7 +1231,7 @@ public void testAdminConfigsClientOverridesWithNonePolicy(boolean enableTopicCre props.put("admin.client.id", "testid"); props.put("admin.metadata.max.age.ms", "5000"); WorkerConfig configWithOverrides = new StandaloneConfig(props); - Map connConfig = Collections.singletonMap("metadata.max.age.ms", "10000"); + Map connConfig = Map.of("metadata.max.age.ms", "10000"); when(connectorConfig.originalsWithPrefix(CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX)).thenReturn(connConfig); @@ -1816,7 +1817,7 @@ public void testExecutorServiceShutdown(boolean enableTopicCreation) throws Inte noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); worker.stop(); verifyKafkaClusterId(); verify(executorService, times(1)).shutdown(); @@ -1838,7 +1839,7 @@ public void testExecutorServiceShutdownWhenTerminationFails(boolean enableTopicC noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); worker.stop(); verifyKafkaClusterId(); verify(executorService, times(1)).shutdown(); @@ -1861,7 +1862,7 @@ public void testExecutorServiceShutdownWhenTerminationThrowsException(boolean en noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Collections.emptySet(), worker.connectorNames()); + assertEquals(Set.of(), worker.connectorNames()); worker.stop(); // Clear the interrupted status so that the test infrastructure doesn't hit an unexpected interrupt. assertTrue(Thread.interrupted()); @@ -1930,7 +1931,7 @@ public void testGetSinkConnectorOffsets(boolean enableTopicCreation) throws Exce worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, allConnectorClientConfigOverridePolicy, config -> admin); worker.start(); - mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(new TopicPartition("test-topic", 0), new OffsetAndMetadata(10)), null); + mockAdminListConsumerGroupOffsets(admin, Map.of(new TopicPartition("test-topic", 0), new OffsetAndMetadata(10)), null); FutureCallback cb = new FutureCallback<>(); worker.sinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, cb); @@ -2037,11 +2038,11 @@ public void testGetSourceConnectorOffsets(boolean enableTopicCreation) throws Ex worker.start(); Set> connectorPartitions = - Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); + Set.of(Map.of("partitionKey", "partitionValue")); - Map, Map> partitionOffsets = Collections.singletonMap( - Collections.singletonMap("partitionKey", "partitionValue"), - Collections.singletonMap("offsetKey", "offsetValue") + Map, Map> partitionOffsets = Map.of( + Map.of("partitionKey", "partitionValue"), + Map.of("offsetKey", "offsetValue") ); when(offsetStore.connectorPartitions(CONNECTOR_ID)).thenReturn(connectorPartitions); @@ -2111,7 +2112,7 @@ public void testAlterOffsetsConnectorDoesNotSupportOffsetAlteration(boolean enab FutureCallback cb = new FutureCallback<>(); worker.modifyConnectorOffsets(CONNECTOR_ID, connectorProps, - Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), cb); ExecutionException e = assertThrows(ExecutionException.class, () -> cb.get(1000, TimeUnit.MILLISECONDS)); @@ -2141,8 +2142,8 @@ public void testAlterOffsetsSourceConnector(boolean enableTopicCreation) throws OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Map, Map> partitionOffsets = new HashMap<>(); - partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")); - partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")); + partitionOffsets.put(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")); + partitionOffsets.put(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(null, null); @@ -2180,8 +2181,8 @@ public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) { OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Map, Map> partitionOffsets = new HashMap<>(); - partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")); - partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")); + partitionOffsets.put(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")); + partitionOffsets.put(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(new RuntimeException("Test exception"), null); @@ -2206,9 +2207,9 @@ public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) { @ValueSource(booleans = {true, false}) public void testNormalizeSourceConnectorOffsets(boolean enableTopicCreation) { setup(enableTopicCreation); - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap("filename", "/path/to/filename"), - Collections.singletonMap("position", 20) + Map, Map> offsets = Map.of( + Map.of("filename", "/path/to/filename"), + Map.of("position", 20) ); assertInstanceOf(Integer.class, offsets.values().iterator().next().get("position")); @@ -2236,11 +2237,11 @@ public void testAlterOffsetsSinkConnectorNoDeletes(boolean enableTopicCreation) Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 500)); + partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 500)); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); - partitionOffsets.put(partition2, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 100)); + partitionOffsets.put(partition2, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 100)); // A null value for deleteOffsetsSetCapture indicates that we don't expect any call to Admin::deleteConsumerGroupOffsets alterOffsetsSinkConnector(partitionOffsets, alterOffsetsMapCapture, null); @@ -2289,7 +2290,7 @@ public void testAlterOffsetsSinkConnectorAltersAndDeletes(boolean enableTopicCre Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); @@ -2300,7 +2301,7 @@ public void testAlterOffsetsSinkConnectorAltersAndDeletes(boolean enableTopicCre assertEquals(1, alterOffsetsMapCapture.getValue().size()); assertEquals(100, alterOffsetsMapCapture.getValue().get(new TopicPartition("test_topic", 10)).offset()); - Set expectedTopicPartitionsForOffsetDelete = Collections.singleton(new TopicPartition("test_topic", 20)); + Set expectedTopicPartitionsForOffsetDelete = Set.of(new TopicPartition("test_topic", 20)); assertEquals(expectedTopicPartitionsForOffsetDelete, deleteOffsetsSetCapture.getValue()); } @@ -2374,8 +2375,8 @@ public void testAlterOffsetsSinkConnectorAlterOffsetsError(boolean enableTopicCr Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - Map, Map> partitionOffsets = Collections.singletonMap(partition1, - Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); + Map, Map> partitionOffsets = Map.of(partition1, + Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); FutureCallback cb = new FutureCallback<>(); worker.modifySinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, partitionOffsets, @@ -2422,7 +2423,7 @@ public void testAlterOffsetsSinkConnectorDeleteOffsetsError(boolean enableTopicC Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); @@ -2463,7 +2464,7 @@ public void testAlterOffsetsSinkConnectorSynchronousError(boolean enableTopicCre Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); FutureCallback cb = new FutureCallback<>(); worker.modifySinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, partitionOffsets, @@ -2503,8 +2504,8 @@ public void testResetOffsetsSourceConnectorExactlyOnceSupportEnabled(boolean ena OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Set> connectorPartitions = new HashSet<>(); - connectorPartitions.add(Collections.singletonMap("partitionKey", "partitionValue1")); - connectorPartitions.add(Collections.singletonMap("partitionKey", "partitionValue2")); + connectorPartitions.add(Map.of("partitionKey", "partitionValue1")); + connectorPartitions.add(Map.of("partitionKey", "partitionValue2")); when(offsetStore.connectorPartitions(eq(CONNECTOR_ID))).thenReturn(connectorPartitions); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(null, null); @@ -2545,7 +2546,7 @@ public void testResetOffsetsSinkConnector(boolean enableTopicCreation) throws Ex when(plugins.withClassLoader(any(ClassLoader.class), any(Runnable.class))).thenAnswer(AdditionalAnswers.returnsSecondArg()); TopicPartition tp = new TopicPartition("test-topic", 0); - mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(tp, new OffsetAndMetadata(10L)), null, time, 2000); + mockAdminListConsumerGroupOffsets(admin, Map.of(tp, new OffsetAndMetadata(10L)), null, time, 2000); when(sinkConnector.alterOffsets(eq(connectorProps), eq(Collections.singletonMap(tp, null)))).thenAnswer(invocation -> { time.sleep(3000); return true; @@ -2587,7 +2588,7 @@ public void testResetOffsetsSinkConnectorDeleteConsumerGroupError(boolean enable when(plugins.withClassLoader(any(ClassLoader.class), any(Runnable.class))).thenAnswer(AdditionalAnswers.returnsSecondArg()); TopicPartition tp = new TopicPartition("test-topic", 0); - mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(tp, new OffsetAndMetadata(10L)), null); + mockAdminListConsumerGroupOffsets(admin, Map.of(tp, new OffsetAndMetadata(10L)), null); when(sinkConnector.alterOffsets(eq(connectorProps), eq(Collections.singletonMap(tp, null)))).thenReturn(true); DeleteConsumerGroupsResult deleteConsumerGroupsResult = mock(DeleteConsumerGroupsResult.class); @@ -2628,9 +2629,9 @@ public void testModifySourceConnectorOffsetsTimeout(boolean enableTopicCreation) KafkaProducer producer = mock(KafkaProducer.class); OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); - Map, Map> partitionOffsets = Collections.singletonMap( - Collections.singletonMap("partitionKey", "partitionValue"), - Collections.singletonMap("offsetKey", "offsetValue")); + Map, Map> partitionOffsets = Map.of( + Map.of("partitionKey", "partitionValue"), + Map.of("offsetKey", "offsetValue")); FutureCallback cb = new FutureCallback<>(); worker.modifySourceConnectorOffsets(CONNECTOR_ID, sourceConnector, connectorProps, partitionOffsets, offsetStore, producer, @@ -2714,7 +2715,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti Map taskConfig = new HashMap<>(); // No warnings or exceptions when a connector generates an empty list of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(Collections.emptyList()); + when(sourceConnector.taskConfigs(1)).thenReturn(List.of()); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); @@ -2723,9 +2724,9 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti } // No warnings or exceptions when a connector generates the maximum permitted number of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); - when(sourceConnector.taskConfigs(2)).thenReturn(Arrays.asList(taskConfig, taskConfig)); - when(sourceConnector.taskConfigs(3)).thenReturn(Arrays.asList(taskConfig, taskConfig, taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(List.of(taskConfig)); + when(sourceConnector.taskConfigs(2)).thenReturn(List.of(taskConfig, taskConfig)); + when(sourceConnector.taskConfigs(3)).thenReturn(List.of(taskConfig, taskConfig, taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); @@ -2739,12 +2740,12 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); assertEquals(3, taskConfigs.size()); - assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("WARN")); - assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); + assertEquals(List.of(), logCaptureAppender.getMessages("WARN")); + assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); } // Warning/exception when a connector generates too many task configs - List> tooManyTaskConfigs = Arrays.asList(taskConfig, taskConfig, taskConfig, taskConfig); + List> tooManyTaskConfigs = List.of(taskConfig, taskConfig, taskConfig, taskConfig); when(sourceConnector.taskConfigs(1)).thenReturn(tooManyTaskConfigs); when(sourceConnector.taskConfigs(2)).thenReturn(tooManyTaskConfigs); when(sourceConnector.taskConfigs(3)).thenReturn(tooManyTaskConfigs); @@ -2779,19 +2780,19 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti ); // Regardless of enforcement, there should never be any error-level log messages - assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); + assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); } } // One last sanity check in case the connector is reconfigured and respects tasks.max - when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(List.of(taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); assertEquals(1, taskConfigs.size()); - assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("WARN")); - assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); + assertEquals(List.of(), logCaptureAppender.getMessages("WARN")); + assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); } worker.stop(); @@ -2815,7 +2816,7 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { SinkTask task = mock(TestSinkTask.class); mockKafkaClusterId(); - Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); + Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -2823,7 +2824,7 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Collections.emptySet(), worker.taskIds()); + assertEquals(Set.of(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); connectorConfigs.put(TASKS_MAX_ENFORCE_CONFIG, Boolean.toString(enforced)); connectorConfigs.put(TOPICS_CONFIG, "t1"); @@ -2838,15 +2839,15 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { 0, null, // ... but it has generated two task configs - Collections.singletonMap(connName, numTasks), - Collections.singletonMap(connName, connectorConfigs), - Collections.singletonMap(connName, TargetState.STARTED), - Collections.singletonMap(TASK_ID, origProps), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(connName, new AppliedConnectorConfig(connectorConfigs)), - Collections.emptySet(), - Collections.emptySet() + Map.of(connName, numTasks), + Map.of(connName, connectorConfigs), + Map.of(connName, TargetState.STARTED), + Map.of(TASK_ID, origProps), + Map.of(), + Map.of(), + Map.of(connName, new AppliedConnectorConfig(connectorConfigs)), + Set.of(), + Set.of() ); String tasksMaxExceededMessage; @@ -2863,7 +2864,7 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { ArgumentCaptor failureCaptor = ArgumentCaptor.forClass(Throwable.class); verify(taskStatusListener, times(1)).onFailure(eq(TASK_ID), failureCaptor.capture()); - assertInstanceOf(TooManyTasksException.class, failureCaptor.getValue(), + assertInstanceOf(TooManyTasksException.class, failureCaptor.getValue(), "Expected task start exception to be TooManyTasksException, but was " + failureCaptor.getValue().getClass() + " instead"); tasksMaxExceededMessage = failureCaptor.getValue().getMessage(); @@ -2962,7 +2963,7 @@ private void verifyStorage() { private void mockInternalConverters() { JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Collections.singletonMap(SCHEMAS_ENABLE_CONFIG, false), false); + jsonConverter.configure(Map.of(SCHEMAS_ENABLE_CONFIG, false), false); when(plugins.newInternalConverter(eq(true), anyString(), anyMap())) .thenReturn(jsonConverter); @@ -3072,6 +3073,7 @@ private void mockVersionedTaskIsolation(Class connectorClas when(plugins.pluginLoader(connectorClass.getName(), range)).thenReturn(pluginLoader); when(plugins.connectorClass(connectorClass.getName(), range)).thenReturn((Class) connectorClass); when(plugins.newTask(taskClass)).thenReturn(task); + when(plugins.safeLoaderSwapper()).thenReturn(TestPlugins.noOpLoaderSwap()); when(task.version()).thenReturn(range == null ? "unknown" : range.toString()); } @@ -3087,7 +3089,7 @@ private void verifyVersionedTaskIsolation(Class connectorCl verify(plugins).pluginLoader(connectorClass.getName(), range); verify(plugins).connectorClass(connectorClass.getName(), range); verify(plugins).newTask(taskClass); - verify(task).version(); + verify(task, times(2)).version(); } private void mockExecutorRealSubmit(Class runnableClass) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java index 3574137b6c301..0546e3bb4e950 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java @@ -31,10 +31,10 @@ import org.mockito.stubbing.OngoingStubbing; import java.util.AbstractMap.SimpleEntry; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -62,11 +62,11 @@ public static ClusterConfigState clusterConfigState(long offset, connectorConfigs, connectorTargetStates(1, connectorNum, TargetState.STARTED), taskConfigs(0, connectorNum, connectorNum * taskNum), - Collections.emptyMap(), - Collections.emptyMap(), + Map.of(), + Map.of(), appliedConnectorConfigs, - Collections.emptySet(), - Collections.emptySet()); + Set.of(), + Set.of()); } public static Map connectorTaskCounts(int start, @@ -168,6 +168,7 @@ public static void assertAssignment(boolean expectFailed, "Wrong rebalance delay in " + assignment); } + @SuppressWarnings("unchecked") public static > TransformationChain getTransformationChain( RetryWithToleranceOperator toleranceOperator, List results) { @@ -195,8 +196,12 @@ public static > TransformationChain buildTra when(transformationPlugin.get()).thenReturn(transformation); TransformationStage stage = new TransformationStage<>( predicatePlugin, + "testPredicate", + null, false, transformationPlugin, + "testTransformation", + null, TestPlugins.noOpLoaderSwap()); TransformationChain realTransformationChainRetriableException = new TransformationChain<>(List.of(stage), toleranceOperator); return Mockito.spy(realTransformationChainRetriableException); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java index f966e12345ff3..e9f6de400dd19 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java @@ -21,8 +21,8 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collections; +import java.util.List; +import java.util.Set; import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1; import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2; @@ -99,48 +99,48 @@ public void testEagerToCoopMetadata() { public void testEagerToEagerAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); + List.of(connectorId1, connectorId3), List.of(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); + List.of(connectorId2), List.of(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(memberBuf); assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testCoopToCoopAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId1, connectorId3), List.of(taskId2x0), + List.of(), List.of(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId2), List.of(taskId1x0, taskId3x0), + List.of(), List.of(), 0); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = @@ -148,15 +148,15 @@ public void testCoopToCoopAssignment() { assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testEagerToCoopAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); + List.of(connectorId1, connectorId3), List.of(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = @@ -164,12 +164,12 @@ public void testEagerToCoopAssignment() { assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); + List.of(connectorId2), List.of(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = @@ -177,37 +177,37 @@ public void testEagerToCoopAssignment() { assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testCoopToEagerAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId1, connectorId3), List.of(taskId2x0), + List.of(), List.of(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId2), List.of(taskId1x0, taskId3x0), + List.of(), List.of(), 0); ByteBuffer memberBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment2, false); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(memberBuf); assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); } private ConnectProtocol.WorkerState emptyWorkerState() { @@ -221,10 +221,10 @@ private ExtendedWorkerState emptyExtendedWorkerState(short protocolVersion) { LEADER, LEADER_URL, CONFIG_OFFSET, - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), + Set.of(), + Set.of(), + Set.of(), + Set.of(), 0 ); return new ExtendedWorkerState(LEADER_URL, CONFIG_OFFSET, assignment); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java index af8aeab46a589..da153572fdac4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java @@ -29,9 +29,6 @@ import java.security.InvalidParameterException; import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -151,9 +148,9 @@ public void testSupportedKeyGeneratorAlgorithms() { private void testSupportedAlgorithms(String type, String... expectedAlgorithms) { Set supportedAlgorithms = DistributedConfig.supportedAlgorithms(type); - Set unsupportedAlgorithms = new HashSet<>(Arrays.asList(expectedAlgorithms)); + Set unsupportedAlgorithms = new HashSet<>(List.of(expectedAlgorithms)); unsupportedAlgorithms.removeAll(supportedAlgorithms); - assertEquals(Collections.emptySet(), unsupportedAlgorithms, type + " algorithms were found that should be supported by this JVM but are not"); + assertEquals(Set.of(), unsupportedAlgorithms, type + " algorithms were found that should be supported by this JVM but are not"); } @Test @@ -214,13 +211,13 @@ public void shouldFailWithInvalidKeySize() throws NoSuchAlgorithmException { @Test public void shouldValidateAllVerificationAlgorithms() { - List algorithms = - new ArrayList<>(Arrays.asList("HmacSHA1", "HmacSHA256", "HmacMD5", "bad-algorithm")); + List algorithms = List.of("HmacSHA1", "HmacSHA256", "HmacMD5", "bad-algorithm"); Map configs = configs(); for (int i = 0; i < algorithms.size(); i++) { configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, String.join(",", algorithms)); assertThrows(ConfigException.class, () -> new DistributedConfig(configs)); - algorithms.add(algorithms.remove(0)); + // Rotate the algorithm list by creating a new list with rotated elements + algorithms = List.of(algorithms.get(1), algorithms.get(2), algorithms.get(3), algorithms.get(0)); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index 73eaf93961e41..820de522f1293 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -87,7 +87,6 @@ import org.mockito.stubbing.Answer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -116,7 +115,6 @@ import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; import static jakarta.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; -import static java.util.Collections.singletonList; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; import static org.apache.kafka.connect.runtime.AbstractStatus.State.FAILED; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -191,7 +189,7 @@ public class DistributedHerderTest { CONN1_CONFIG_UPDATED.put(SinkConnectorConfig.TOPICS_CONFIG, String.join(",", FOO_TOPIC, BAR_TOPIC, BAZ_TOPIC)); } private static final ConfigInfos CONN1_CONFIG_INFOS = - new ConfigInfos(CONN1, 0, Collections.emptyList(), Collections.emptyList()); + new ConfigInfos(CONN1, 0, List.of(), List.of()); private static final Map CONN2_CONFIG = new HashMap<>(); static { CONN2_CONFIG.put(ConnectorConfig.NAME_CONFIG, CONN2); @@ -200,9 +198,9 @@ public class DistributedHerderTest { CONN2_CONFIG.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, BogusSourceConnector.class.getName()); } private static final ConfigInfos CONN2_CONFIG_INFOS = - new ConfigInfos(CONN2, 0, Collections.emptyList(), Collections.emptyList()); + new ConfigInfos(CONN2, 0, List.of(), List.of()); private static final ConfigInfos CONN2_INVALID_CONFIG_INFOS = - new ConfigInfos(CONN2, 1, Collections.emptyList(), Collections.emptyList()); + new ConfigInfos(CONN2, 1, List.of(), List.of()); private static final Map TASK_CONFIG = new HashMap<>(); static { TASK_CONFIG.put(TaskConfig.TASK_CLASS_CONFIG, BogusSourceTask.class.getName()); @@ -222,64 +220,64 @@ public class DistributedHerderTest { private static final ClusterConfigState SNAPSHOT = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); private static final ClusterConfigState SNAPSHOT_PAUSED_CONN1 = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.PAUSED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.PAUSED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); private static final ClusterConfigState SNAPSHOT_STOPPED_CONN1 = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 0), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STOPPED), - Collections.emptyMap(), // Stopped connectors should have an empty set of task configs - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, 10), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.singleton(CONN1), - Collections.emptySet()); + Map.of(CONN1, 0), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STOPPED), + Map.of(), // Stopped connectors should have an empty set of task configs + Map.of(CONN1, 3), + Map.of(CONN1, 10), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(CONN1), + Set.of()); private static final ClusterConfigState SNAPSHOT_STOPPED_CONN1_FENCED = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 0), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STOPPED), - Collections.emptyMap(), - Collections.singletonMap(CONN1, 0), - Collections.singletonMap(CONN1, 11), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(CONN1, 0), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STOPPED), + Map.of(), + Map.of(CONN1, 0), + Map.of(CONN1, 11), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); private static final ClusterConfigState SNAPSHOT_UPDATED_CONN1_CONFIG = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG_UPDATED), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG_UPDATED), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG_UPDATED)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG_UPDATED)), + Set.of(), + Set.of()); private static final String WORKER_ID = "localhost:8083"; private static final String KAFKA_CLUSTER_ID = "I4ZmrWqfT2e-upky_4fdPA"; @@ -320,7 +318,7 @@ public void setUp() throws Exception { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[]{uponShutdown})); + noneConnectorClientConfigOverridePolicy, List.of(), null, new AutoCloseable[]{uponShutdown})); verify(worker).getPlugins(); configUpdateListener = herder.new ConfigUpdateListener(); rebalanceListener = herder.new RebalanceListener(time); @@ -344,7 +342,7 @@ public void testJoinAssignment() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); + expectRebalance(1, List.of(CONN1), List.of(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -370,7 +368,7 @@ public void testRebalance() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); + expectRebalance(1, List.of(CONN1), List.of(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -395,8 +393,8 @@ public void testRebalance() { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, List.of(CONN1), List.of()); herder.tick(); time.sleep(3000L); assertStatistics(3, 2, 100, 3000); @@ -418,7 +416,7 @@ public void testIncrementalCooperativeRebalanceForNewMember() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -429,9 +427,9 @@ public void testIncrementalCooperativeRebalanceForNewMember() { herder.tick(); // The new member got its assignment - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, - 1, singletonList(CONN1), singletonList(TASK1), 0); + 1, List.of(CONN1), List.of(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -462,9 +460,9 @@ public void testIncrementalCooperativeRebalanceForExistingMember() { // Join group. First rebalance contains revocations because a new member joined. when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - expectRebalance(singletonList(CONN1), singletonList(TASK1), + expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, 1, - Collections.emptyList(), Collections.emptyList(), 0); + List.of(), List.of(), 0); doNothing().when(member).requestRejoin(); expectMemberPoll(); @@ -476,7 +474,7 @@ public void testIncrementalCooperativeRebalanceForExistingMember() { // In the second rebalance the new member gets its assignment and this member has no // assignments or revocations - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -499,9 +497,9 @@ public void testIncrementalCooperativeRebalanceWithDelay() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, - Collections.emptyList(), singletonList(TASK2), + List.of(), List.of(TASK2), rebalanceDelay); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -520,9 +518,9 @@ public void testIncrementalCooperativeRebalanceWithDelay() { herder.tick(); // The member got its assignment and revocation - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, - 1, singletonList(CONN1), singletonList(TASK1), 0); + 1, List.of(CONN1), List.of(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -549,7 +547,7 @@ public void testRebalanceFailedConnector() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); + expectRebalance(1, List.of(CONN1), List.of(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -571,8 +569,8 @@ public void testRebalanceFailedConnector() { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, List.of(CONN1), List.of()); // worker is not running, so we should see no call to connectorTaskConfigs() expectExecuteTaskReconfiguration(false, null, null); @@ -610,7 +608,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(connectProtocolVersion); // The lists need to be mutable because assignments might be removed - expectRebalance(configOffset, new ArrayList<>(singletonList(CONN1)), new ArrayList<>(singletonList(TASK1))); + expectRebalance(configOffset, new ArrayList<>(List.of(CONN1)), new ArrayList<>(List.of(TASK1))); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -631,7 +629,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { // Perform a partial re-balance just prior to the revocation // bump the configOffset to trigger reading the config topic to the end configOffset++; - expectRebalance(configOffset, Collections.emptyList(), Collections.emptyList()); + expectRebalance(configOffset, List.of(), List.of()); // give it the wrong snapshot, as if we're out of sync/can't reach the broker expectConfigRefreshAndSnapshot(SNAPSHOT); doNothing().when(member).requestRejoin(); @@ -641,9 +639,9 @@ public void revokeAndReassign(boolean incompleteRebalance) { } // Revoke the connector in the next rebalance - expectRebalance(singletonList(CONN1), Collections.emptyList(), - ConnectProtocol.Assignment.NO_ERROR, configOffset, Collections.emptyList(), - Collections.emptyList()); + expectRebalance(List.of(CONN1), List.of(), + ConnectProtocol.Assignment.NO_ERROR, configOffset, List.of(), + List.of()); if (incompleteRebalance) { // Same as SNAPSHOT, except with an updated offset @@ -651,15 +649,15 @@ public void revokeAndReassign(boolean incompleteRebalance) { ClusterConfigState secondSnapshot = new ClusterConfigState( configOffset, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet() + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of() ); expectConfigRefreshAndSnapshot(secondSnapshot); } @@ -669,7 +667,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { herder.tick(); // re-assign the connector back to the same worker to ensure state was cleaned up - expectRebalance(configOffset, singletonList(CONN1), Collections.emptyList()); + expectRebalance(configOffset, List.of(CONN1), List.of()); herder.tick(); @@ -704,10 +702,10 @@ public void testCreateConnector() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -742,12 +740,12 @@ public void testCreateConnector() { time.sleep(1000L); assertStatistics(3, 1, 100, 1000L); - ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, Collections.emptyList(), ConnectorType.SOURCE); + ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, List.of(), ConnectorType.SOURCE); verify(putConnectorCallback).onCompletion(isNull(), eq(new Herder.Created<>(true, info))); verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -760,10 +758,10 @@ public void testCreateConnectorWithInitialState() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -797,12 +795,12 @@ public void testCreateConnectorWithInitialState() { time.sleep(1000L); assertStatistics(3, 1, 100, 1000L); - ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, Collections.emptyList(), ConnectorType.SOURCE); + ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, List.of(), ConnectorType.SOURCE); verify(putConnectorCallback).onCompletion(isNull(), eq(new Herder.Created<>(true, info))); verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -814,10 +812,10 @@ public void testCreateConnectorWithInitialState() { public void testCreateConnectorConfigBackingStoreError() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -855,7 +853,7 @@ public void testCreateConnectorConfigBackingStoreError() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -867,10 +865,10 @@ public void testCreateConnectorConfigBackingStoreError() { public void testCreateConnectorFailedValidation() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); HashMap config = new HashMap<>(CONN2_CONFIG); @@ -901,7 +899,7 @@ public void testCreateConnectorFailedValidation() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic" @@ -924,7 +922,7 @@ public void testConnectorNameConflictsWithWorkerGroupId() { ConfigValue nameConfig = validatedConfigs.get(ConnectorConfig.NAME_CONFIG); assertEquals( - Collections.singletonList("Consumer group for sink connector named test-group conflicts with Connect worker group connect-test-group"), + List.of("Consumer group for sink connector named test-group conflicts with Connect worker group connect-test-group"), nameConfig.errorMessages()); } @@ -943,12 +941,12 @@ public void testConnectorGroupIdConflictsWithWorkerGroupId() { ConfigValue overriddenGroupIdConfig = validatedConfigs.get(overriddenGroupId); assertEquals( - Collections.singletonList("Consumer group connect-test-group conflicts with Connect worker group connect-test-group"), + List.of("Consumer group connect-test-group conflicts with Connect worker group connect-test-group"), overriddenGroupIdConfig.errorMessages()); ConfigValue nameConfig = validatedConfigs.get(ConnectorConfig.NAME_CONFIG); assertEquals( - Collections.emptyList(), + List.of(), nameConfig.errorMessages() ); } @@ -957,10 +955,10 @@ public void testConnectorGroupIdConflictsWithWorkerGroupId() { public void testCreateConnectorAlreadyExists() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // mock the actual validation since its asynchronous nature is difficult to test and should @@ -987,7 +985,7 @@ public void testCreateConnectorAlreadyExists() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic" @@ -1002,7 +1000,7 @@ public void testDestroyConnector() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // Start with one connector - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); + expectRebalance(1, List.of(CONN1), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -1012,7 +1010,7 @@ public void testDestroyConnector() { }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), eq(TargetState.STARTED), onStart.capture()); expectExecuteTaskReconfiguration(true, conn1SinkConfig, invocation -> TASK_CONFIGS); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // And delete the connector @@ -1031,13 +1029,13 @@ public void testDestroyConnector() { // tasks are revoked TopicStatus fooStatus = new TopicStatus(FOO_TOPIC, CONN1, 0, time.milliseconds()); TopicStatus barStatus = new TopicStatus(BAR_TOPIC, CONN1, 0, time.milliseconds()); - when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(new HashSet<>(Arrays.asList(fooStatus, barStatus))); + when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(Set.of(fooStatus, barStatus)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(FOO_TOPIC)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(BAR_TOPIC)); - expectRebalance(singletonList(CONN1), singletonList(TASK1), + expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, 2, "leader", "leaderUrl", - Collections.emptyList(), Collections.emptyList(), 0, true); + List.of(), List.of(), 0, true); expectConfigRefreshAndSnapshot(ClusterConfigState.EMPTY); doNothing().when(member).requestRejoin(); @@ -1050,7 +1048,7 @@ public void testDestroyConnector() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - Arrays.asList( + List.of( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic", @@ -1068,10 +1066,10 @@ public void testRestartConnector() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); + expectRebalance(1, List.of(CONN1), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -1104,10 +1102,10 @@ public void testRestartUnknownConnector() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1128,7 +1126,7 @@ public void testRestartConnectorRedirectToLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1152,10 +1150,10 @@ public void testRestartConnectorRedirectToOwner() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1190,10 +1188,10 @@ public void testRestartConnectorAndTasksUnknownConnector() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1216,7 +1214,7 @@ public void testRestartConnectorAndTasksNotLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1241,10 +1239,10 @@ public void testRestartConnectorAndTasksUnknownStatus() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1270,10 +1268,10 @@ public void testRestartConnectorAndTasksSuccess() throws Exception { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1311,7 +1309,7 @@ public void testDoRestartConnectorAndTasksNoAssignments() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); @@ -1327,13 +1325,13 @@ public void testDoRestartConnectorAndTasksOnlyConnector() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(Collections.singletonList(CONN1)); - when(herder.assignment.tasks()).thenReturn(Collections.emptyList()); + when(herder.assignment.connectors()).thenReturn(List.of(CONN1)); + when(herder.assignment.tasks()).thenReturn(List.of()); herder.configState = SNAPSHOT; @@ -1348,6 +1346,7 @@ public void testDoRestartConnectorAndTasksOnlyConnector() { return true; }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), any(), stateCallback.capture()); doNothing().when(member).wakeup(); + when(worker.connectorVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1360,24 +1359,25 @@ public void testDoRestartConnectorAndTasksOnlyTasks() { RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); // The connector has three tasks - when(restartPlan.taskIdsToRestart()).thenReturn(Arrays.asList(TASK0, TASK1, TASK2)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(TASK0, TASK1, TASK2)); when(restartPlan.totalTaskCount()).thenReturn(3); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(Collections.emptyList()); + when(herder.assignment.connectors()).thenReturn(List.of()); // But only one task is assigned to this worker - when(herder.assignment.tasks()).thenReturn(Collections.singletonList(TASK0)); + when(herder.assignment.tasks()).thenReturn(List.of(TASK0)); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); herder.configState = SNAPSHOT; - doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(TASK0)); + doNothing().when(worker).stopAndAwaitTasks(List.of(TASK0)); TaskStatus status = new TaskStatus(TASK0, AbstractStatus.State.RESTARTING, WORKER_ID, 0); doNothing().when(statusBackingStore).put(eq(status)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); + when(worker.taskVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1390,14 +1390,14 @@ public void testDoRestartConnectorAndTasksBoth() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); when(restartPlan.totalTaskCount()).thenReturn(1); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(Collections.singletonList(CONN1)); - when(herder.assignment.tasks()).thenReturn(Collections.singletonList(taskId)); + when(herder.assignment.connectors()).thenReturn(List.of(CONN1)); + when(herder.assignment.tasks()).thenReturn(List.of(taskId)); herder.configState = SNAPSHOT; @@ -1413,12 +1413,14 @@ public void testDoRestartConnectorAndTasksBoth() { }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), any(), stateCallback.capture()); doNothing().when(member).wakeup(); - doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); + doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); TaskStatus taskStatus = new TaskStatus(TASK0, AbstractStatus.State.RESTARTING, WORKER_ID, 0); doNothing().when(statusBackingStore).put(eq(taskStatus)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); + when(worker.taskVersion(any())).thenReturn(null); + when(worker.connectorVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1431,10 +1433,10 @@ public void testRestartTask() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); + expectRebalance(1, List.of(), List.of(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); @@ -1459,7 +1461,7 @@ public void testRestartUnknownTask() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1481,7 +1483,7 @@ public void testRestartTaskRedirectToLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1504,10 +1506,10 @@ public void testRestartTaskRedirectToOwner() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); @@ -1552,7 +1554,7 @@ public void testConnectorConfigAdded() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join, no configs so no need to catch up on config topic - expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(-1, List.of(), List.of()); expectMemberPoll(); herder.tick(); // join @@ -1566,8 +1568,8 @@ public void testConnectorConfigAdded() { herder.tick(); // apply config // Performs rebalance and gets new assignment - expectRebalance(Collections.emptyList(), Collections.emptyList(), - ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(List.of(), List.of(), + ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of()); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -1593,7 +1595,7 @@ public void testConnectorConfigDetectedAfterLeaderAlreadyAssigned(short protocol when(member.currentProtocolVersion()).thenReturn(protocolVersion); // join, no configs so no need to catch up on config topic - expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(-1, List.of(), List.of()); expectMemberPoll(); herder.tick(); // join @@ -1611,8 +1613,8 @@ public void testConnectorConfigDetectedAfterLeaderAlreadyAssigned(short protocol // Performs rebalance and gets new assignment // Important--we're simulating a scenario where the leader has already detected the new // connector, and assigns it to our herder at the top of its tick thread - expectRebalance(Collections.emptyList(), Collections.emptyList(), - ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(List.of(), List.of(), + ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of()); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -1637,7 +1639,7 @@ public void testConnectorConfigUpdate() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(1, List.of(CONN1), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1670,10 +1672,11 @@ public void testConnectorConfigUpdateFailedTransformation() { when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); + when(worker.connectorVersion(CONN1)).thenReturn(null); WorkerConfigTransformer configTransformer = mock(WorkerConfigTransformer.class); // join - expectRebalance(1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(1, List.of(CONN1), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1692,15 +1695,15 @@ public void testConnectorConfigUpdateFailedTransformation() { ClusterConfigState snapshotWithTransform = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet(), + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of(), configTransformer ); when(configBackingStore.snapshot()).thenReturn(snapshotWithTransform); @@ -1730,7 +1733,7 @@ public void testConnectorPaused() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(1, List.of(CONN1), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1768,7 +1771,7 @@ public void testConnectorResumed() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // start with the connector paused - expectRebalance(1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(1, List.of(CONN1), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1); expectMemberPoll(); @@ -1809,7 +1812,7 @@ public void testConnectorStopped() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, singletonList(CONN1), Collections.emptyList()); + expectRebalance(1, List.of(CONN1), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1847,7 +1850,7 @@ public void testUnknownConnectorPaused() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join - expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); + expectRebalance(1, List.of(), List.of(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1872,10 +1875,10 @@ public void testStopConnector() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join as leader - expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); + expectRebalance(1, List.of(), List.of(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -1885,7 +1888,7 @@ public void testStopConnector() throws Exception { // handle stop request expectMemberEnsureActive(); expectConfigRefreshAndSnapshot(SNAPSHOT); - doNothing().when(configBackingStore).putTaskConfigs(CONN1, Collections.emptyList()); + doNothing().when(configBackingStore).putTaskConfigs(CONN1, List.of()); doNothing().when(configBackingStore).putTargetState(CONN1, TargetState.STOPPED); FutureCallback cb = new FutureCallback<>(); @@ -1906,7 +1909,7 @@ public void testStopConnectorNotLeader() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join as member (non-leader) - expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); + expectRebalance(1, List.of(), List.of(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1939,10 +1942,10 @@ public void testStopConnectorFailToWriteTaskConfigs() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join as leader - expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); + expectRebalance(1, List.of(), List.of(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -1952,7 +1955,7 @@ public void testStopConnectorFailToWriteTaskConfigs() { ConnectException taskConfigsWriteException = new ConnectException("Could not write task configs to config topic"); // handle stop request expectMemberEnsureActive(); - doThrow(taskConfigsWriteException).when(configBackingStore).putTaskConfigs(CONN1, Collections.emptyList()); + doThrow(taskConfigsWriteException).when(configBackingStore).putTaskConfigs(CONN1, List.of()); // We do not expect configBackingStore::putTargetState to be invoked, which // is intentional since that call should only take place if we are first able to // successfully write the empty list of task configs @@ -1983,7 +1986,7 @@ public void testConnectorPausedRunningTaskOnly() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); + expectRebalance(1, List.of(), List.of(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2018,7 +2021,7 @@ public void testConnectorResumedRunningTaskOnly() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); + expectRebalance(1, List.of(), List.of(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1); expectMemberPoll(); @@ -2052,7 +2055,7 @@ public void testTaskConfigAdded() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join - expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(-1, List.of(), List.of()); expectMemberPoll(); herder.tick(); // join @@ -2062,13 +2065,13 @@ public void testTaskConfigAdded() { // Rebalance will be triggered when the new config is detected doNothing().when(member).requestRejoin(); - configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK0, TASK1, TASK2)); // read updated config + configUpdateListener.onTaskConfigUpdate(List.of(TASK0, TASK1, TASK2)); // read updated config herder.tick(); // apply config // Performs rebalance and gets new assignment - expectRebalance(Collections.emptyList(), Collections.emptyList(), - ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(), - singletonList(TASK0)); + expectRebalance(List.of(), List.of(), + ConnectProtocol.Assignment.NO_ERROR, 1, List.of(), + List.of(TASK0)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); herder.tick(); // do rebalance @@ -2082,13 +2085,13 @@ public void testJoinLeaderCatchUpFails() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(Collections.emptyList(), Collections.emptyList(), - ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", Collections.emptyList(), - Collections.emptyList(), 0, true); + expectRebalance(List.of(), List.of(), + ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", List.of(), + List.of(), 0, true); // Reading to end of log times out doThrow(new TimeoutException()).when(configBackingStore).refresh(anyLong(), any(TimeUnit.class)); @@ -2107,7 +2110,7 @@ public void testJoinLeaderCatchUpFails() throws Exception { before = time.milliseconds(); // After backoff, restart the process and this time succeed - expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); + expectRebalance(1, List.of(CONN1), List.of(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -2126,7 +2129,7 @@ public void testJoinLeaderCatchUpFails() throws Exception { assertStatistics("leaderUrl", false, 3, 1, 100, 2000L); // one more tick, to make sure we don't keep trying to read to the config topic unnecessarily - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); // tick once more to ensure that the successful read to the end of the config topic was // tracked and no further unnecessary attempts were made @@ -2143,10 +2146,10 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // Join group as leader when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); + expectRebalance(1, List.of(CONN1), List.of(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2165,9 +2168,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep herder.tick(); // The leader gets the same assignment after a rebalance is triggered - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); + 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2176,9 +2179,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // Another rebalance is triggered but this time it fails to read to the max offset and // triggers a re-sync - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", - Collections.emptyList(), Collections.emptyList(), 0, true); + List.of(), List.of(), 0, true); // The leader will retry a few times to read to the end of the config log doNothing().when(member).requestRejoin(); @@ -2199,9 +2202,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep } // After a few retries succeed to read the log to the end - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); + 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); before = time.milliseconds(); @@ -2219,10 +2222,10 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // Join group as leader when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); + expectRebalance(1, List.of(CONN1), List.of(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2241,9 +2244,9 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti herder.tick(); // The leader gets the same assignment after a rebalance is triggered - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, - "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); + "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2252,9 +2255,9 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // Another rebalance is triggered but this time it fails to read to the max offset and // triggers a re-sync - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", - Collections.emptyList(), Collections.emptyList(), 0, true); + List.of(), List.of(), 0, true); // The leader will exhaust the retries while trying to read to the end of the config log doNothing().when(member).requestRejoin(); @@ -2280,14 +2283,14 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti herder.tick(); assertEquals(before, time.milliseconds()); - assertEquals(Collections.singleton(CONN1), assignmentCapture.getValue().connectors()); - assertEquals(Collections.singleton(TASK1), assignmentCapture.getValue().tasks()); + assertEquals(Set.of(CONN1), assignmentCapture.getValue().connectors()); + assertEquals(Set.of(TASK1), assignmentCapture.getValue().tasks()); // After a complete backoff and a revocation of running tasks rejoin and this time succeed // The worker gets back the assignment that had given up - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), + 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -2300,10 +2303,10 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti public void testAccessors() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectMemberPoll(); @@ -2312,15 +2315,15 @@ public void testAccessors() throws Exception { ClusterConfigState snapshotWithTransform = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet(), + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of(), configTransformer); expectConfigRefreshAndSnapshot(snapshotWithTransform); @@ -2337,15 +2340,15 @@ public void testAccessors() throws Exception { herder.tick(); assertTrue(listConnectorsCb.isDone()); - assertEquals(Collections.singleton(CONN1), listConnectorsCb.get()); + assertEquals(Set.of(CONN1), listConnectorsCb.get()); assertTrue(connectorInfoCb.isDone()); - ConnectorInfo info = new ConnectorInfo(CONN1, CONN1_CONFIG, Arrays.asList(TASK0, TASK1, TASK2), + ConnectorInfo info = new ConnectorInfo(CONN1, CONN1_CONFIG, List.of(TASK0, TASK1, TASK2), ConnectorType.SOURCE); assertEquals(info, connectorInfoCb.get()); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG, connectorConfigCb.get()); assertTrue(taskConfigsCb.isDone()); - assertEquals(Arrays.asList( + assertEquals(List.of( new TaskInfo(TASK0, TASK_CONFIG), new TaskInfo(TASK1, TASK_CONFIG), new TaskInfo(TASK2, TASK_CONFIG)), @@ -2361,8 +2364,8 @@ public void testPutConnectorConfig() throws Exception { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); when(member.memberId()).thenReturn("leader"); - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + expectRebalance(1, List.of(CONN1), List.of(), true); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); @@ -2414,7 +2417,7 @@ public void testPutConnectorConfig() throws Exception { herder.putConnectorConfig(CONN1, CONN1_CONFIG_UPDATED, true, putConfigCb); herder.tick(); assertTrue(putConfigCb.isDone()); - ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, Arrays.asList(TASK0, TASK1, TASK2), + ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, List.of(TASK0, TASK1, TASK2), ConnectorType.SOURCE); assertEquals(new Herder.Created<>(false, updatedInfo), putConfigCb.get()); @@ -2433,21 +2436,21 @@ public void testPutConnectorConfig() throws Exception { @Test public void testPatchConnectorConfigNotFound() { when(member.memberId()).thenReturn("leader"); - expectRebalance(0, Collections.emptyList(), Collections.emptyList(), true); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + expectRebalance(0, List.of(), List.of(), true); + when(statusBackingStore.connectors()).thenReturn(Set.of()); ClusterConfigState clusterConfigState = new ClusterConfigState( 0, null, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of()); expectConfigRefreshAndSnapshot(clusterConfigState); Map connConfigPatch = new HashMap<>(); @@ -2469,21 +2472,21 @@ public void testPatchConnectorConfigNotALeader() { ClusterConfigState originalSnapshot = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 0), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet()); + Map.of(CONN1, 0), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of()); expectConfigRefreshAndSnapshot(originalSnapshot); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // Patch the connector config. - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), false); + expectRebalance(1, List.of(CONN1), List.of(), false); FutureCallback> patchCallback = new FutureCallback<>(); herder.patchConnectorConfig(CONN1, new HashMap<>(), patchCallback); @@ -2497,7 +2500,7 @@ public void testPatchConnectorConfigNotALeader() { public void testPatchConnectorConfig() throws Exception { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); when(member.memberId()).thenReturn("leader"); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); Map originalConnConfig = new HashMap<>(CONN1_CONFIG); originalConnConfig.put("foo0", "unaffected"); @@ -2509,15 +2512,15 @@ public void testPatchConnectorConfig() throws Exception { ClusterConfigState originalSnapshot = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 0), - Collections.singletonMap(CONN1, originalConnConfig), - Collections.singletonMap(CONN1, TargetState.STARTED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet()); + Map.of(CONN1, 0), + Map.of(CONN1, originalConnConfig), + Map.of(CONN1, TargetState.STARTED), + Map.of(), + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of()); expectConfigRefreshAndSnapshot(originalSnapshot); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); @@ -2535,7 +2538,7 @@ public void testPatchConnectorConfig() throws Exception { patchedConnConfig.remove("foo2"); patchedConnConfig.put("foo3", "added"); - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); + expectRebalance(1, List.of(CONN1), List.of(), true); ArgumentCaptor> validateCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -2560,11 +2563,11 @@ public void testPatchConnectorConfig() throws Exception { @Test public void testKeyRotationWhenWorkerBecomesLeader() { - long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_MS_DEFAULT; + long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_DEFAULT; when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2574,20 +2577,20 @@ public void testKeyRotationWhenWorkerBecomesLeader() { // First rebalance: poll indefinitely as no key has been read yet, so expiration doesn't come into play verify(member).poll(eq(Long.MAX_VALUE), any()); - expectRebalance(2, Collections.emptyList(), Collections.emptyList()); + expectRebalance(2, List.of(), List.of()); SessionKey initialKey = new SessionKey(mock(SecretKey.class), 0); ClusterConfigState snapshotWithKey = new ClusterConfigState( 2, initialKey, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); expectConfigRefreshAndSnapshot(snapshotWithKey); configUpdateListener.onSessionKeyUpdate(initialKey); @@ -2596,8 +2599,8 @@ public void testKeyRotationWhenWorkerBecomesLeader() { // Second rebalance: poll indefinitely as worker is follower, so expiration still doesn't come into play verify(member, times(2)).poll(eq(Long.MAX_VALUE), any()); - expectRebalance(2, Collections.emptyList(), Collections.emptyList(), "member", MEMBER_URL, true); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + expectRebalance(2, List.of(), List.of(), "member", MEMBER_URL, true); + when(statusBackingStore.connectors()).thenReturn(Set.of()); ArgumentCaptor updatedKey = ArgumentCaptor.forClass(SessionKey.class); doAnswer(invocation -> { configUpdateListener.onSessionKeyUpdate(updatedKey.getValue()); @@ -2613,12 +2616,12 @@ public void testKeyRotationWhenWorkerBecomesLeader() { @Test public void testKeyRotationDisabledWhenWorkerBecomesFollower() { - long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_MS_DEFAULT; + long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_DEFAULT; when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), "member", MEMBER_URL, true); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + expectRebalance(1, List.of(), List.of(), "member", MEMBER_URL, true); + when(statusBackingStore.connectors()).thenReturn(Set.of()); SecretKey initialSecretKey = mock(SecretKey.class); when(initialSecretKey.getAlgorithm()).thenReturn(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT); when(initialSecretKey.getEncoded()).thenReturn(new byte[32]); @@ -2626,15 +2629,15 @@ public void testKeyRotationDisabledWhenWorkerBecomesFollower() { ClusterConfigState snapshotWithKey = new ClusterConfigState( 1, initialKey, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); expectConfigRefreshAndSnapshot(snapshotWithKey); expectMemberPoll(); @@ -2644,7 +2647,7 @@ public void testKeyRotationDisabledWhenWorkerBecomesFollower() { // First rebalance: poll for a limited time as worker is leader and must wake up for key expiration verify(member).poll(leq(rotationTtlDelay), any()); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); herder.tick(); // Second rebalance: poll indefinitely as worker is no longer leader, so key expiration doesn't come into play @@ -2664,7 +2667,7 @@ public void testPutTaskConfigsSignatureNotRequiredV0() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - singletonList("awaiting startup"), + List.of("awaiting startup"), stages ); } @@ -2681,7 +2684,7 @@ public void testPutTaskConfigsSignatureNotRequiredV1() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - singletonList("awaiting startup"), + List.of("awaiting startup"), stages ); } @@ -2787,7 +2790,7 @@ public void testPutTaskConfigsValidRequiredSignature() { verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - singletonList("awaiting startup"), + List.of("awaiting startup"), stages ); } @@ -2798,7 +2801,7 @@ public void testFailedToWriteSessionKey() { // session key to the config topic, and fail when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); doThrow(new ConnectException("Oh no!")).when(configBackingStore).putSessionKey(any(SessionKey.class)); @@ -2828,15 +2831,15 @@ public void testFailedToReadBackNewlyWrittenSessionKey() throws Exception { ClusterConfigState snapshotWithSessionKey = new ClusterConfigState( 1, sessionKey, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(), + Map.of(), + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); // First tick -- after joining the group, we try to write a new session key to // the config topic, and fail (in this case, we're trying to simulate that we've @@ -2845,7 +2848,7 @@ public void testFailedToReadBackNewlyWrittenSessionKey() throws Exception { // to write the key) when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); doThrow(new ConnectException("Oh no!")).when(configBackingStore).putSessionKey(any(SessionKey.class)); @@ -2918,7 +2921,7 @@ private void testTaskRequestedZombieFencingForwardingToLeader(boolean succeed) t when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); expectConfigRefreshAndSnapshot(SNAPSHOT); - expectRebalance(1, Collections.emptyList(), Collections.emptyList()); + expectRebalance(1, List.of(), List.of()); expectMemberPoll(); doAnswer(invocation -> { @@ -2965,9 +2968,9 @@ public void testExternalZombieFencingRequestForAlreadyFencedConnector() throws E ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), TASK_CONFIGS_MAP, - Collections.singletonMap(CONN1, 12), - Collections.singletonMap(CONN1, 5), - Collections.emptySet() + Map.of(CONN1, 12), + Map.of(CONN1, 5), + Set.of() ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, false); } @@ -2977,10 +2980,10 @@ public void testExternalZombieFencingRequestForSingleTaskConnector() throws Exce when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), - Collections.singletonMap(TASK1, TASK_CONFIG), - Collections.singletonMap(CONN1, 1), - Collections.singletonMap(CONN1, 5), - Collections.singleton(CONN1) + Map.of(TASK1, TASK_CONFIG), + Map.of(CONN1, 1), + Map.of(CONN1, 5), + Set.of(CONN1) ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, true); } @@ -2991,9 +2994,9 @@ public void testExternalZombieFencingRequestForFreshConnector() throws Exception ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), TASK_CONFIGS_MAP, - Collections.emptyMap(), - Collections.singletonMap(CONN1, 5), - Collections.singleton(CONN1) + Map.of(), + Map.of(CONN1, 5), + Set.of(CONN1) ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, true); } @@ -3007,9 +3010,9 @@ private void testExternalZombieFencingRequestThatRequiresNoPhysicalFencing( when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); expectConfigRefreshAndSnapshot(configState); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); if (expectTaskCountRecord) { @@ -3042,19 +3045,19 @@ public void testExternalZombieFencingRequestImmediateCompletion() throws Excepti when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Collections.singletonMap(CONN1, 2), - Collections.singletonMap(CONN1, 5), - Collections.singleton(CONN1) + Map.of(CONN1, 2), + Map.of(CONN1, 5), + Set.of(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // The future returned by Worker::fenceZombies @@ -3102,19 +3105,19 @@ public void testExternalZombieFencingRequestSynchronousFailure() throws Exceptio when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Collections.singletonMap(CONN1, 2), - Collections.singletonMap(CONN1, 5), - Collections.singleton(CONN1) + Map.of(CONN1, 2), + Map.of(CONN1, 5), + Set.of(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); Exception fencingException = new KafkaException("whoops!"); @@ -3146,19 +3149,19 @@ public void testExternalZombieFencingRequestAsynchronousFailure() throws Excepti when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Collections.singletonMap(CONN1, 2), - Collections.singletonMap(CONN1, 5), - Collections.singleton(CONN1) + Map.of(CONN1, 2), + Map.of(CONN1, 5), + Set.of(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // The future returned by Worker::fenceZombies @@ -3216,7 +3219,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); SessionKey sessionKey = expectNewSessionKey(); Map taskCountRecords = new HashMap<>(); @@ -3227,7 +3230,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception taskConfigGenerations.put(CONN1, 3); taskConfigGenerations.put(CONN2, 4); taskConfigGenerations.put(conn3, 2); - Set pendingFencing = new HashSet<>(Arrays.asList(CONN1, CONN2, conn3)); + Set pendingFencing = Set.of(CONN1, CONN2, conn3); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, @@ -3238,7 +3241,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); // The callbacks that the herder has accrued for outstanding fencing futures, which will be completed after @@ -3284,7 +3287,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception tasksPerConnector.forEach((connector, numStackedRequests) -> { List> connectorFencingRequests = IntStream.range(0, numStackedRequests) .mapToObj(i -> new FutureCallback()) - .collect(Collectors.toList()); + .toList(); connectorFencingRequests.forEach(fencing -> herder.fenceZombieSourceTasks(connector, fencing) @@ -3320,22 +3323,22 @@ public void testVerifyTaskGeneration() { herder.configState = new ClusterConfigState( 1, null, - Collections.singletonMap(CONN1, 3), - Collections.singletonMap(CONN1, CONN1_CONFIG), - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, 3), + Map.of(CONN1, CONN1_CONFIG), + Map.of(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Collections.emptyMap(), + Map.of(), taskConfigGenerations, - Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Collections.emptySet(), - Collections.emptySet()); + Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Set.of(), + Set.of()); Callback verifyCallback = mock(Callback.class); herder.assignment = new ExtendedAssignment( (short) 2, (short) 0, "leader", "leaderUrl", 0, - Collections.emptySet(), Collections.singleton(TASK1), - Collections.emptySet(), Collections.emptySet(), 0); + Set.of(), Set.of(TASK1), + Set.of(), Set.of(), 0); assertThrows(ConnectException.class, () -> herder.verifyTaskGenerationAndOwnership(TASK1, 0, verifyCallback)); assertThrows(ConnectException.class, () -> herder.verifyTaskGenerationAndOwnership(TASK1, 1, verifyCallback)); @@ -3428,7 +3431,7 @@ public void testPollDurationOnSlowConnectorOperations() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // Assign the connector to this worker, and have it start - expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onFirstStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -3441,7 +3444,7 @@ public void testPollDurationOnSlowConnectorOperations() { herder.tick(); // Rebalance again due to config update - expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT_UPDATED_CONN1_CONFIG); doNothing().when(worker).stopAndAwaitConnector(CONN1); @@ -3457,7 +3460,7 @@ public void testPollDurationOnSlowConnectorOperations() { herder.tick(); // Third tick should resolve all outstanding requests - expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); // which includes querying the connector task configs after the update expectExecuteTaskReconfiguration(true, conn1SinkConfigUpdated, invocation -> { time.sleep(operationDelayMs); @@ -3476,7 +3479,7 @@ public void testPollDurationOnSlowConnectorOperations() { public void shouldThrowWhenStartAndStopExecutorThrowsRejectedExecutionExceptionAndHerderNotStopping() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); + expectRebalance(1, List.of(CONN1), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.startAndStopExecutor.shutdown(); @@ -3488,7 +3491,7 @@ public void testTaskReconfigurationRetriesWithConnectorTaskConfigsException() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3510,7 +3513,7 @@ public void testTaskReconfigurationNoRetryWithTooManyTasks() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3550,14 +3553,14 @@ public void testTaskReconfigurationNoRetryWithTooManyTasks() { public void testTaskReconfigurationRetriesWithLeaderRequestForwardingException() { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, Collections.emptyList(), new MockSynchronousExecutor(), new AutoCloseable[]{})); + noneConnectorClientConfigOverridePolicy, List.of(), new MockSynchronousExecutor(), new AutoCloseable[]{})); verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), false); + expectRebalance(1, List.of(), List.of(), false); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3673,7 +3676,7 @@ public void testExactlyOnceSourceSupportValidation() { connectorMock, SourceConnectorConfig.configDef(), config); List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); - assertEquals(Collections.emptyList(), errors); + assertEquals(List.of(), errors); } @Test @@ -3690,7 +3693,7 @@ public void testExactlyOnceSourceSupportValidationOnUnsupportedConnector() { List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); assertEquals( - Collections.singletonList("The connector does not support exactly-once semantics with the provided configuration."), + List.of("The connector does not support exactly-once semantics with the provided configuration."), errors); } @@ -3748,7 +3751,7 @@ public void testExactlyOnceSourceSupportValidationWhenExactlyOnceNotEnabledOnWor List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); assertEquals( - Collections.singletonList("This worker does not have exactly-once source support enabled."), + List.of("This worker does not have exactly-once source support enabled."), errors); } @@ -3785,7 +3788,7 @@ public void testConnectorTransactionBoundaryValidation() { connectorMock, SourceConnectorConfig.configDef(), config); List errors = validatedConfigs.get(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG).errorMessages(); - assertEquals(Collections.emptyList(), errors); + assertEquals(List.of(), errors); } @Test @@ -3853,18 +3856,18 @@ public void testConnectorTransactionBoundaryValidationHandlesInvalidValuesGracef public void testConnectorOffsets() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); + when(statusBackingStore.connectors()).thenReturn(Set.of()); expectMemberPoll(); herder.tick(); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT); - ConnectorOffsets offsets = new ConnectorOffsets(Collections.singletonList(new ConnectorOffset( - Collections.singletonMap("partitionKey", "partitionValue"), - Collections.singletonMap("offsetKey", "offsetValue")))); + ConnectorOffsets offsets = new ConnectorOffsets(List.of(new ConnectorOffset( + Map.of("partitionKey", "partitionValue"), + Map.of("offsetKey", "offsetValue")))); ArgumentCaptor> callbackCapture = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -3885,7 +3888,7 @@ public void testModifyConnectorOffsetsUnknownConnector() { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.tick(); @@ -3903,7 +3906,7 @@ public void testModifyOffsetsConnectorNotInStoppedState() { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.tick(); @@ -3921,7 +3924,7 @@ public void testModifyOffsetsNotLeader() { // Get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), false); + expectRebalance(1, List.of(), List.of(), false); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); @@ -3940,15 +3943,15 @@ public void testModifyOffsetsSinkConnector() throws Exception { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); // Now handle the alter connector offsets request - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap("partitionKey", "partitionValue"), - Collections.singletonMap("offsetKey", "offsetValue")); + Map, Map> offsets = Map.of( + Map.of("partitionKey", "partitionValue"), + Map.of("offsetKey", "offsetValue")); ArgumentCaptor> workerCallbackCapture = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); @@ -3970,7 +3973,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceDisabled() throws Excepti when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); @@ -3998,7 +4001,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); expectMemberPoll(); @@ -4023,9 +4026,9 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio ArgumentCaptor> workerCallbackCapture = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); - Map, Map> offsets = Collections.singletonMap( - Collections.singletonMap("partitionKey", "partitionValue"), - Collections.singletonMap("offsetKey", "offsetValue")); + Map, Map> offsets = Map.of( + Map.of("partitionKey", "partitionValue"), + Map.of("offsetKey", "offsetValue")); doAnswer(invocation -> { workerCallbackCapture.getValue().onCompletion(null, msg); return null; @@ -4065,7 +4068,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabledZombieFencingFailu // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); + expectRebalance(1, List.of(), List.of(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); expectMemberPoll(); @@ -4130,14 +4133,14 @@ private void expectRebalance(final long offset, final List assignedTasks, final boolean isLeader) { - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, offset, "leader", "leaderUrl", assignedConnectors, assignedTasks, 0, isLeader); } private void expectRebalance(final long offset, final List assignedConnectors, final List assignedTasks, String leader, String leaderUrl, boolean isLeader) { - expectRebalance(Collections.emptyList(), Collections.emptyList(), + expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, offset, leader, leaderUrl, assignedConnectors, assignedTasks, 0, isLeader); } @@ -4183,12 +4186,12 @@ private void expectRebalance(final Collection revokedConnectors, if (connectProtocolVersion == CONNECT_PROTOCOL_V0) { assignment = new ExtendedAssignment( connectProtocolVersion, error, leader, leaderUrl, offset, - assignedConnectors, assignedTasks, - Collections.emptyList(), Collections.emptyList(), 0); + new ArrayList<>(assignedConnectors), new ArrayList<>(assignedTasks), + new ArrayList<>(), new ArrayList<>(), 0); } else { assignment = new ExtendedAssignment( connectProtocolVersion, error, leader, leaderUrl, offset, - assignedConnectors, assignedTasks, + new ArrayList<>(assignedConnectors), new ArrayList<>(assignedTasks), new ArrayList<>(revokedConnectors), new ArrayList<>(revokedTasks), delay); } rebalanceListener.onAssigned(assignment, 3); @@ -4265,13 +4268,13 @@ private ClusterConfigState exactlyOnceSnapshot( sessionKey, taskCounts, connectorConfigs, - Collections.singletonMap(CONN1, TargetState.STARTED), + Map.of(CONN1, TargetState.STARTED), taskConfigs, taskCountRecords, taskConfigGenerations, appliedConnectorConfigs, pendingFencing, - Collections.emptySet()); + Set.of()); } private void expectExecuteTaskReconfiguration(boolean running, ConnectorConfig connectorConfig, Answer>> answer) { @@ -4420,7 +4423,7 @@ private DistributedHerder exactlyOnceHerder() { config.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); return mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(config), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[0])); + noneConnectorClientConfigOverridePolicy, List.of(), null, new AutoCloseable[0])); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java index 86bc897fafe23..84bb8b145ad0a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java @@ -37,9 +37,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -662,8 +660,8 @@ public void testAssignConnectorsWhenBalanced() { List expectedAssignment = existingAssignment.stream() .map(wl -> new WorkerLoad.Builder(wl.worker()).withCopies(wl.connectors(), wl.tasks()).build()) .collect(Collectors.toList()); - expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); - expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); + expectedAssignment.get(0).connectors().addAll(List.of("connector6", "connector9")); + expectedAssignment.get(1).connectors().addAll(List.of("connector7", "connector10")); expectedAssignment.get(2).connectors().add("connector8"); List newConnectors = newConnectors(6, 11); @@ -682,12 +680,12 @@ public void testAssignTasksWhenBalanced() { .map(wl -> new WorkerLoad.Builder(wl.worker()).withCopies(wl.connectors(), wl.tasks()).build()) .collect(Collectors.toList()); - expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); - expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); + expectedAssignment.get(0).connectors().addAll(List.of("connector6", "connector9")); + expectedAssignment.get(1).connectors().addAll(List.of("connector7", "connector10")); expectedAssignment.get(2).connectors().add("connector8"); - expectedAssignment.get(0).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 6), new ConnectorTaskId("task", 9))); - expectedAssignment.get(1).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 7), new ConnectorTaskId("task", 10))); + expectedAssignment.get(0).tasks().addAll(List.of(new ConnectorTaskId("task", 6), new ConnectorTaskId("task", 9))); + expectedAssignment.get(1).tasks().addAll(List.of(new ConnectorTaskId("task", 7), new ConnectorTaskId("task", 10))); expectedAssignment.get(2).tasks().add(new ConnectorTaskId("task", 8)); List newConnectors = newConnectors(6, 11); @@ -734,7 +732,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -751,7 +749,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -766,7 +764,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.singleton(flakyWorker), + assertEquals(Set.of(flakyWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -787,7 +785,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { .tasks() .containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -814,7 +812,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -831,7 +829,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -845,7 +843,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -861,7 +859,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { "Wrong assignment of lost connectors"); assertTrue(lostAssignmentsToReassign.build().tasks().containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -888,7 +886,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -908,7 +906,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.singleton(newWorker), + assertEquals(Set.of(newWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -923,7 +921,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - Set expectedWorkers = new HashSet<>(Arrays.asList(newWorker, flakyWorker)); + Set expectedWorkers = Set.of(newWorker, flakyWorker); assertEquals(expectedWorkers, assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); @@ -956,7 +954,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { "Wrong assignment of lost connectors"); assertTrue(listOfTasksInLast2Workers.containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -983,7 +981,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1000,7 +998,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -1015,7 +1013,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.singleton(veryFlakyWorker), + assertEquals(Set.of(veryFlakyWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -1034,7 +1032,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { "Wrong assignment of lost connectors"); assertTrue(lostAssignmentsToReassign.build().tasks().containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1062,7 +1060,7 @@ public void testLostAssignmentHandlingWhenScheduledDelayIsDisabled() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1080,7 +1078,7 @@ public void testLostAssignmentHandlingWhenScheduledDelayIsDisabled() { assignor.handleLostAssignments(lostAssignments, lostAssignmentsToReassign, new ArrayList<>(configuredAssignment.values())); - assertEquals(Collections.emptySet(), + assertEquals(Set.of(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1221,10 +1219,10 @@ public void testProtocolV1() { leader, "followMe:618", CONFIG_OFFSET, - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), + Set.of(), + Set.of(), + Set.of(), + Set.of(), 0 ); ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment); @@ -1236,7 +1234,7 @@ public void testProtocolV1() { when(coordinator.configSnapshot()).thenReturn(configState()); Map serializedAssignments = assignor.performAssignment( leader, - ConnectProtocolCompatibility.COMPATIBLE.protocol(), + ConnectProtocolCompatibility.COMPATIBLE, memberMetadata, coordinator ); @@ -1262,10 +1260,10 @@ public void testProtocolV2() { leader, "followMe:618", CONFIG_OFFSET, - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), - Collections.emptySet(), + Set.of(), + Set.of(), + Set.of(), + Set.of(), 0 ); ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment); @@ -1277,7 +1275,7 @@ public void testProtocolV2() { when(coordinator.configSnapshot()).thenReturn(configState()); Map serializedAssignments = assignor.performAssignment( leader, - ConnectProtocolCompatibility.SESSIONED.protocol(), + ConnectProtocolCompatibility.SESSIONED, memberMetadata, coordinator ); @@ -1328,7 +1326,7 @@ private void performRebalance(boolean assignmentFailure, boolean generationMisma private void addNewEmptyWorkers(String... workers) { for (String worker : workers) { - addNewWorker(worker, Collections.emptyList(), Collections.emptyList()); + addNewWorker(worker, List.of(), List.of()); } } @@ -1392,13 +1390,13 @@ private void removeConnector(String connector) { private ClusterConfigState configState() { Map taskCounts = new HashMap<>(connectors); - Map> connectorConfigs = transformValues(taskCounts, c -> Collections.emptyMap()); + Map> connectorConfigs = transformValues(taskCounts, c -> Map.of()); Map targetStates = transformValues(taskCounts, c -> TargetState.STARTED); Map> taskConfigs = taskCounts.entrySet().stream() .flatMap(e -> IntStream.range(0, e.getValue()).mapToObj(i -> new ConnectorTaskId(e.getKey(), i))) .collect(Collectors.toMap( Function.identity(), - connectorTaskId -> Collections.emptyMap() + connectorTaskId -> Map.of() )); Map appliedConnectorConfigs = connectorConfigs.entrySet().stream() .collect(Collectors.toMap( @@ -1412,11 +1410,11 @@ private ClusterConfigState configState() { connectorConfigs, targetStates, taskConfigs, - Collections.emptyMap(), - Collections.emptyMap(), + Map.of(), + Map.of(), appliedConnectorConfigs, - Collections.emptySet(), - Collections.emptySet()); + Set.of(), + Set.of()); } private void applyAssignments() { @@ -1440,22 +1438,22 @@ private void applyAssignments() { } private void assertEmptyAssignment() { - assertEquals(Collections.emptyList(), + assertEquals(List.of(), ConnectUtils.combineCollections(returnedAssignments.newlyAssignedConnectors().values()), "No connectors should have been newly assigned during this round"); - assertEquals(Collections.emptyList(), + assertEquals(List.of(), ConnectUtils.combineCollections(returnedAssignments.newlyAssignedTasks().values()), "No tasks should have been newly assigned during this round"); - assertEquals(Collections.emptyList(), + assertEquals(List.of(), ConnectUtils.combineCollections(returnedAssignments.newlyRevokedConnectors().values()), "No connectors should have been revoked during this round"); - assertEquals(Collections.emptyList(), + assertEquals(List.of(), ConnectUtils.combineCollections(returnedAssignments.newlyRevokedTasks().values()), "No tasks should have been revoked during this round"); } private void assertWorkers(String... workers) { - assertEquals(new HashSet<>(Arrays.asList(workers)), returnedAssignments.allWorkers(), "Wrong set of workers"); + assertEquals(Set.of(workers), returnedAssignments.allWorkers(), "Wrong set of workers"); } /** @@ -1500,14 +1498,14 @@ private List allocations(Function assertEquals( - Collections.emptySet(), + Set.of(), new HashSet<>(revocations), "Expected no revocations to take place during this round, but connector revocations were issued for worker " + worker ) ); returnedAssignments.newlyRevokedTasks().forEach((worker, revocations) -> assertEquals( - Collections.emptySet(), + Set.of(), new HashSet<>(revocations), "Expected no revocations to take place during this round, but task revocations were issued for worker " + worker ) @@ -1542,11 +1540,11 @@ private void assertNoRedundantAssignments() { ); existingConnectors.retainAll(newConnectors); - assertEquals(Collections.emptyList(), + assertEquals(List.of(), existingConnectors, "Found connectors in new assignment that already exist in current assignment"); existingTasks.retainAll(newTasks); - assertEquals(Collections.emptyList(), + assertEquals(List.of(), existingConnectors, "Found tasks in new assignment that already exist in current assignment"); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java index a8e7cd465529f..047a3b5d4e94c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java @@ -42,9 +42,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -126,7 +124,7 @@ public void init(ConnectProtocolCompatibility compatibility) { this.time = new MockTime(); this.metadata = new Metadata(0, 0, Long.MAX_VALUE, loggerFactory, new ClusterResourceListeners()); this.client = new MockClient(time, metadata); - this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1))); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Map.of("topic", 1))); this.node = metadata.fetch().nodes().get(0); this.consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs); @@ -149,9 +147,9 @@ public void init(ConnectProtocolCompatibility compatibility) { heartbeatIntervalMs, groupId, Optional.empty(), + null, retryBackoffMs, - retryBackoffMaxMs, - true); + retryBackoffMaxMs); this.coordinator = new WorkerCoordinator(rebalanceConfig, loggerFactory, consumerClient, @@ -209,8 +207,8 @@ public void testMetadataWithExistingAssignment(ConnectProtocolCompatibility comp ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ExtendedAssignment.NO_ERROR, leaderId, leaderUrl, configState1.offset(), - Collections.singletonList(connectorId1), Arrays.asList(taskId1x0, taskId2x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId1), List.of(taskId1x0, taskId2x0), + List.of(), List.of(), 0); ByteBuffer buf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); // Using onJoinComplete to register the protocol selection decided by the broker // coordinator as well as an existing previous assignment that the call to metadata will @@ -228,8 +226,8 @@ public void testMetadataWithExistingAssignment(ConnectProtocolCompatibility comp .deserializeMetadata(ByteBuffer.wrap(selectedMetadata.metadata())); assertEquals(offset, state.offset()); assertNotEquals(ExtendedAssignment.empty(), state.assignment()); - assertEquals(Collections.singletonList(connectorId1), state.assignment().connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId2x0), state.assignment().tasks()); + assertEquals(List.of(connectorId1), state.assignment().connectors()); + assertEquals(List.of(taskId1x0, taskId2x0), state.assignment().tasks()); verify(configStorage, times(1)).snapshot(); } @@ -242,8 +240,8 @@ public void testMetadataWithExistingAssignmentButOlderProtocolSelection(ConnectP ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ExtendedAssignment.NO_ERROR, leaderId, leaderUrl, configState1.offset(), - Collections.singletonList(connectorId1), Arrays.asList(taskId1x0, taskId2x0), - Collections.emptyList(), Collections.emptyList(), 0); + List.of(connectorId1), List.of(taskId1x0, taskId2x0), + List.of(), List.of(), 0); ByteBuffer buf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); // Using onJoinComplete to register the protocol selection decided by the broker // coordinator as well as an existing previous assignment that the call to metadata will @@ -282,14 +280,14 @@ public void testTaskAssignmentWhenWorkerJoins(ConnectProtocolCompatibility compa ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId1), 4, - Collections.emptyList(), 0, + List.of(connectorId1), 4, + List.of(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId2), 4, - Collections.emptyList(), 0, + List.of(connectorId2), 4, + List.of(), 0, memberAssignment); coordinator.metadata(); @@ -305,20 +303,20 @@ public void testTaskAssignmentWhenWorkerJoins(ConnectProtocolCompatibility compa //Equally distributing tasks across member leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 1, + List.of(), 0, + List.of(), 1, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 1, + List.of(), 0, + List.of(), 1, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, anotherMemberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -343,20 +341,20 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId1), 3, - Collections.emptyList(), 0, + List.of(connectorId1), 3, + List.of(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId2), 3, - Collections.emptyList(), 0, + List.of(connectorId2), 3, + List.of(), 0, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 2, - Collections.emptyList(), 0, + List.of(), 2, + List.of(), 0, anotherMemberAssignment); // Second rebalance detects a worker is missing @@ -372,15 +370,15 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, memberAssignment); @@ -392,15 +390,15 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, memberAssignment); @@ -411,14 +409,14 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 1, - Collections.emptyList(), 0, + List.of(), 1, + List.of(), 0, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 1, - Collections.emptyList(), 0, + List.of(), 1, + List.of(), 0, memberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -443,20 +441,20 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId1), 3, - Collections.emptyList(), 0, + List.of(connectorId1), 3, + List.of(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.singletonList(connectorId2), 3, - Collections.emptyList(), 0, + List.of(connectorId2), 3, + List.of(), 0, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 2, - Collections.emptyList(), 0, + List.of(), 2, + List.of(), 0, anotherMemberAssignment); // Second rebalance detects a worker is missing @@ -471,15 +469,15 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, memberAssignment); @@ -493,22 +491,22 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, memberAssignment); anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, rebalanceDelay, anotherMemberAssignment); @@ -519,20 +517,20 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com // A rebalance after the delay expires re-assigns the lost tasks to the returning member leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 0, - Collections.emptyList(), 0, + List.of(), 0, + List.of(), 0, memberAssignment); anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - Collections.emptyList(), 2, - Collections.emptyList(), 0, + List.of(), 2, + List.of(), 0, anotherMemberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -542,8 +540,8 @@ private static class MockRebalanceListener implements WorkerRebalanceListener { public ExtendedAssignment assignment = null; public String revokedLeader; - public Collection revokedConnectors = Collections.emptyList(); - public Collection revokedTasks = Collections.emptyList(); + public Collection revokedConnectors = List.of(); + public Collection revokedTasks = List.of(); public int revokedCount = 0; public int assignedCount = 0; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java index 4122578266aaf..46a5b076a3398 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java @@ -54,14 +54,13 @@ import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -128,7 +127,7 @@ public void setup(ConnectProtocolCompatibility compatibility) { this.time = new MockTime(); this.metadata = new Metadata(0, 0, Long.MAX_VALUE, logContext, new ClusterResourceListeners()); this.client = new MockClient(time, metadata); - this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1))); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Map.of("topic", 1))); this.node = metadata.fetch().nodes().get(0); this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, heartbeatIntervalMs); this.metrics = new Metrics(time); @@ -139,9 +138,9 @@ public void setup(ConnectProtocolCompatibility compatibility) { heartbeatIntervalMs, groupId, Optional.empty(), + null, retryBackoffMs, - retryBackoffMaxMs, - true); + retryBackoffMaxMs); this.coordinator = new WorkerCoordinator(rebalanceConfig, logContext, consumerClient, @@ -157,15 +156,15 @@ public void setup(ConnectProtocolCompatibility compatibility) { configState1 = new ClusterConfigState( 4L, null, - Collections.singletonMap(connectorId1, 1), - Collections.singletonMap(connectorId1, new HashMap<>()), - Collections.singletonMap(connectorId1, TargetState.STARTED), - Collections.singletonMap(taskId1x0, new HashMap<>()), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet() + Map.of(connectorId1, 1), + Map.of(connectorId1, new HashMap<>()), + Map.of(connectorId1, TargetState.STARTED), + Map.of(taskId1x0, new HashMap<>()), + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of() ); Map configState2ConnectorTaskCounts = new HashMap<>(); @@ -188,11 +187,11 @@ public void setup(ConnectProtocolCompatibility compatibility) { configState2ConnectorConfigs, configState2TargetStates, configState2TaskConfigs, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptySet(), - Collections.emptySet() + Map.of(), + Map.of(), + Map.of(), + Set.of(), + Set.of() ); Map configStateSingleTaskConnectorsConnectorTaskCounts = new HashMap<>(); @@ -223,11 +222,11 @@ public void setup(ConnectProtocolCompatibility compatibility) { configStateSingleTaskConnectorsConnectorConfigs, configStateSingleTaskConnectorsTargetStates, configStateSingleTaskConnectorsTaskConfigs, - Collections.emptyMap(), - Collections.emptyMap(), + Map.of(), + Map.of(), appliedConnectorConfigs, - Collections.emptySet(), - Collections.emptySet() + Set.of(), + Set.of() ); } @@ -280,8 +279,8 @@ public void testNormalJoinGroupLeader(ConnectProtocolCompatibility compatibility return sync.data().memberId().equals(memberId) && sync.data().generationId() == 1 && sync.groupAssignments().containsKey(memberId); - }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.singletonList(connectorId1), - Collections.emptyList(), Errors.NONE)); + }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(connectorId1), + List.of(), Errors.NONE)); coordinator.ensureActiveGroup(); assertFalse(coordinator.rejoinNeededOrPending()); @@ -290,8 +289,8 @@ public void testNormalJoinGroupLeader(ConnectProtocolCompatibility compatibility assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); assertEquals("leader", rebalanceListener.assignment.leader()); - assertEquals(Collections.singletonList(connectorId1), rebalanceListener.assignment.connectors()); - assertEquals(Collections.emptyList(), rebalanceListener.assignment.tasks()); + assertEquals(List.of(connectorId1), rebalanceListener.assignment.connectors()); + assertEquals(List.of(), rebalanceListener.assignment.tasks()); verify(configStorage).snapshot(); } @@ -314,8 +313,8 @@ public void testNormalJoinGroupFollower(ConnectProtocolCompatibility compatibili return sync.data().memberId().equals(memberId) && sync.data().generationId() == 1 && sync.data().assignments().isEmpty(); - }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.emptyList(), - Collections.singletonList(taskId1x0), Errors.NONE)); + }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(), + List.of(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); assertFalse(coordinator.rejoinNeededOrPending()); @@ -323,8 +322,8 @@ public void testNormalJoinGroupFollower(ConnectProtocolCompatibility compatibili assertEquals(1, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(Collections.emptyList(), rebalanceListener.assignment.connectors()); - assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.assignment.tasks()); + assertEquals(List.of(), rebalanceListener.assignment.connectors()); + assertEquals(List.of(taskId1x0), rebalanceListener.assignment.tasks()); verify(configStorage).snapshot(); } @@ -351,14 +350,14 @@ public void testJoinLeaderCannotAssign(ConnectProtocolCompatibility compatibilit sync.data().assignments().isEmpty(); }; client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.CONFIG_MISMATCH, "leader", configState2.offset(), - Collections.emptyList(), Collections.emptyList(), Errors.NONE)); + List.of(), List.of(), Errors.NONE)); // When the first round fails, we'll take an updated config snapshot when(configStorage.snapshot()).thenReturn(configState2); client.prepareResponse(joinGroupFollowerResponse(1, memberId, "leader", Errors.NONE)); client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState2.offset(), - Collections.emptyList(), Collections.singletonList(taskId1x0), Errors.NONE)); + List.of(), List.of(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); verify(configStorage, times(2)).snapshot(); @@ -375,32 +374,32 @@ public void testRejoinGroup(ConnectProtocolCompatibility compatibility) { // join the group once client.prepareResponse(joinGroupFollowerResponse(1, "member", "leader", Errors.NONE)); - client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.emptyList(), - Collections.singletonList(taskId1x0), Errors.NONE)); + client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(), + List.of(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(0, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(Collections.emptyList(), rebalanceListener.assignment.connectors()); - assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.assignment.tasks()); + assertEquals(List.of(), rebalanceListener.assignment.connectors()); + assertEquals(List.of(taskId1x0), rebalanceListener.assignment.tasks()); // and join the group again coordinator.requestRejoin("test"); client.prepareResponse(joinGroupFollowerResponse(1, "member", "leader", Errors.NONE)); - client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.singletonList(connectorId1), - Collections.emptyList(), Errors.NONE)); + client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(connectorId1), + List.of(), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(1, rebalanceListener.revokedCount); - assertEquals(Collections.emptyList(), rebalanceListener.revokedConnectors); - assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.revokedTasks); + assertEquals(List.of(), rebalanceListener.revokedConnectors); + assertEquals(List.of(taskId1x0), rebalanceListener.revokedTasks); assertEquals(2, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(Collections.singletonList(connectorId1), rebalanceListener.assignment.connectors()); - assertEquals(Collections.emptyList(), rebalanceListener.assignment.tasks()); + assertEquals(List.of(connectorId1), rebalanceListener.assignment.connectors()); + assertEquals(List.of(), rebalanceListener.assignment.tasks()); verify(configStorage, times(2)).snapshot(); } @@ -434,15 +433,15 @@ public void testLeaderPerformAssignment1(ConnectProtocolCompatibility compatibil assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configState1.offset(), leaderAssignment.offset()); - assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); - assertEquals(Collections.emptyList(), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1), leaderAssignment.connectors()); + assertEquals(List.of(), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configState1.offset(), memberAssignment.offset()); - assertEquals(Collections.emptyList(), memberAssignment.connectors()); - assertEquals(Collections.singletonList(taskId1x0), memberAssignment.tasks()); + assertEquals(List.of(), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -477,15 +476,15 @@ public void testLeaderPerformAssignment2(ConnectProtocolCompatibility compatibil assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configState2.offset(), leaderAssignment.offset()); - assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1), leaderAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configState2.offset(), memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Collections.singletonList(taskId1x1), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x1), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -521,15 +520,15 @@ public void testLeaderPerformAssignmentSingleTaskConnectors(ConnectProtocolCompa assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configStateSingleTaskConnectors.offset(), leaderAssignment.offset()); - assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); + assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configStateSingleTaskConnectors.offset(), memberAssignment.offset()); - assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); - assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(List.of(connectorId2), memberAssignment.connectors()); + assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -546,7 +545,7 @@ public void testSkippingAssignmentFails(ConnectProtocolCompatibility compatibili coordinator.metadata(); assertThrows(IllegalStateException.class, - () -> coordinator.onLeaderElected("leader", EAGER.protocol(), Collections.emptyList(), true)); + () -> coordinator.onLeaderElected("leader", EAGER.protocol(), List.of(), true)); verify(configStorage).snapshot(); } @@ -582,7 +581,7 @@ private JoinGroupResponse joinGroupFollowerResponse(int generationId, String mem .setProtocolName(EAGER.protocol()) .setLeader(leaderId) .setMemberId(memberId) - .setMembers(Collections.emptyList()), + .setMembers(List.of()), ApiKeys.JOIN_GROUP.latestVersion() ); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java index fad39d84129dc..4886431869c1e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java @@ -59,6 +59,7 @@ public class WorkerGroupMemberTest { public void testMetrics() throws Exception { WorkerGroupMember member; Map workerProps = new HashMap<>(); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -102,6 +103,7 @@ public void testMetrics() throws Exception { public void testDisableJmxReporter() { WorkerGroupMember member; Map workerProps = new HashMap<>(); + workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("group.id", "group-1"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java index 8dc1800034d61..ce0188ecf8aa0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java @@ -46,8 +46,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_CONNECTOR_NAME; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_EXCEPTION; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_EXCEPTION_MESSAGE; @@ -105,13 +103,13 @@ public void tearDown() { @Test public void initializeDLQWithNullMetrics() { - assertThrows(NullPointerException.class, () -> new DeadLetterQueueReporter(producer, config(emptyMap()), TASK_ID, null)); + assertThrows(NullPointerException.class, () -> new DeadLetterQueueReporter(producer, config(Map.of()), TASK_ID, null)); } @Test public void testDLQConfigWithEmptyTopicName() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(emptyMap()), TASK_ID, errorHandlingMetrics); + producer, config(Map.of()), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -124,7 +122,7 @@ public void testDLQConfigWithEmptyTopicName() { @Test public void testDLQConfigWithValidTopicName() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -138,7 +136,7 @@ public void testDLQConfigWithValidTopicName() { @Test public void testReportDLQTwice() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -153,7 +151,7 @@ public void testReportDLQTwice() { @Test public void testCloseDLQ() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); deadLetterQueueReporter.close(); verify(producer).close(); @@ -161,7 +159,7 @@ public void testCloseDLQ() { @Test public void testLogOnDisabledLogReporter() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(emptyMap()), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of()), errorHandlingMetrics); ProcessingContext> context = processingContext(); context.error(new RuntimeException()); @@ -173,7 +171,7 @@ public void testLogOnDisabledLogReporter() { @Test public void testLogOnEnabledLogReporter() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(singletonMap(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); ProcessingContext> context = processingContext(); context.error(new RuntimeException()); @@ -185,7 +183,7 @@ public void testLogOnEnabledLogReporter() { @Test public void testLogMessageWithNoRecords() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(singletonMap(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -231,10 +229,10 @@ public void testLogReportAndReturnFuture() { @Test public void testSetDLQConfigs() { - SinkConnectorConfig configuration = config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)); + SinkConnectorConfig configuration = config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)); assertEquals(DLQ_TOPIC, configuration.dlqTopicName()); - configuration = config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "7")); + configuration = config(Map.of(SinkConnectorConfig.DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "7")); assertEquals(7, configuration.dlqTopicReplicationFactor()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java index ec3ecefea96da..f9e47afd0b31f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java @@ -44,8 +44,6 @@ import org.mockito.quality.Strictness; import org.mockito.stubbing.OngoingStubbing; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,11 +52,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import java.util.stream.IntStream; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.apache.kafka.common.utils.Time.SYSTEM; import static org.apache.kafka.connect.runtime.ConnectorConfig.ERRORS_RETRY_MAX_DELAY_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.ERRORS_RETRY_MAX_DELAY_DEFAULT; @@ -93,6 +88,7 @@ public class RetryWithToleranceOperatorTest { put(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.INFO.toString()); // define required properties + put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); }}; @@ -232,22 +228,22 @@ private RetryWithToleranceOperator setupExecutor(ToleranceType toleranceT @Test public void testExecAndHandleRetriableErrorOnce() throws Exception { - execAndHandleRetriableError(6000, 1, Collections.singletonList(300L), new RetriableException("Test"), true); + execAndHandleRetriableError(6000, 1, List.of(300L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorThrice() throws Exception { - execAndHandleRetriableError(6000, 3, Arrays.asList(300L, 600L, 1200L), new RetriableException("Test"), true); + execAndHandleRetriableError(6000, 3, List.of(300L, 600L, 1200L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorWithInfiniteRetries() throws Exception { - execAndHandleRetriableError(-1, 8, Arrays.asList(300L, 600L, 1200L, 2400L, 4800L, 9600L, 19200L, 38400L), new RetriableException("Test"), true); + execAndHandleRetriableError(-1, 8, List.of(300L, 600L, 1200L, 2400L, 4800L, 9600L, 19200L, 38400L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorWithMaxRetriesExceeded() throws Exception { - execAndHandleRetriableError(6000, 6, Arrays.asList(300L, 600L, 1200L, 2400L, 1500L), new RetriableException("Test"), false); + execAndHandleRetriableError(6000, 6, List.of(300L, 600L, 1200L, 2400L, 1500L), new RetriableException("Test"), false); } public void execAndHandleRetriableError(long errorRetryTimeout, int numRetriableExceptionsThrown, List expectedWaits, Exception e, boolean successExpected) throws Exception { @@ -396,7 +392,7 @@ public void testToleranceLimit() { @Test public void testDefaultConfigs() { - ConnectorConfig configuration = config(emptyMap()); + ConnectorConfig configuration = config(Map.of()); assertEquals(ERRORS_RETRY_TIMEOUT_DEFAULT, configuration.errorRetryTimeout()); assertEquals(ERRORS_RETRY_MAX_DELAY_DEFAULT, configuration.errorMaxDelayInMillis()); assertEquals(ERRORS_TOLERANCE_DEFAULT, configuration.errorToleranceType()); @@ -413,13 +409,13 @@ ConnectorConfig config(Map connProps) { @Test public void testSetConfigs() { ConnectorConfig configuration; - configuration = config(singletonMap(ERRORS_RETRY_TIMEOUT_CONFIG, "100")); + configuration = config(Map.of(ERRORS_RETRY_TIMEOUT_CONFIG, "100")); assertEquals(100, configuration.errorRetryTimeout()); - configuration = config(singletonMap(ERRORS_RETRY_MAX_DELAY_CONFIG, "100")); + configuration = config(Map.of(ERRORS_RETRY_MAX_DELAY_CONFIG, "100")); assertEquals(100, configuration.errorMaxDelayInMillis()); - configuration = config(singletonMap(ERRORS_TOLERANCE_CONFIG, "none")); + configuration = config(Map.of(ERRORS_TOLERANCE_CONFIG, "none")); assertEquals(ToleranceType.NONE, configuration.errorToleranceType()); } @@ -439,7 +435,7 @@ private void testReport(int numberOfReports) { RetryWithToleranceOperator> retryWithToleranceOperator = new RetryWithToleranceOperator<>(-1, ERRORS_RETRY_MAX_DELAY_DEFAULT, ALL, time, errorHandlingMetrics, exitLatch); ConsumerRecord consumerRecord = new ConsumerRecord<>("t", 0, 0, null, null); List> fs = IntStream.range(0, numberOfReports).mapToObj(i -> new CompletableFuture()).toList(); - List>> reporters = IntStream.range(0, numberOfReports).mapToObj(i -> (ErrorReporter>) c -> fs.get(i)).collect(Collectors.toList()); + List>> reporters = IntStream.range(0, numberOfReports).mapToObj(i -> (ErrorReporter>) c -> fs.get(i)).toList(); retryWithToleranceOperator.reporters(reporters); ProcessingContext> context = new ProcessingContext<>(consumerRecord); Future result = retryWithToleranceOperator.report(context); @@ -458,7 +454,7 @@ public void testCloseErrorReporters() { RetryWithToleranceOperator retryWithToleranceOperator = allOperator(); - retryWithToleranceOperator.reporters(Arrays.asList(reporterA, reporterB)); + retryWithToleranceOperator.reporters(List.of(reporterA, reporterB)); // Even though the reporters throw exceptions, they should both still be closed. @@ -475,7 +471,7 @@ public void testCloseErrorReportersExceptionPropagation() { RetryWithToleranceOperator retryWithToleranceOperator = allOperator(); - retryWithToleranceOperator.reporters(Arrays.asList(reporterA, reporterB)); + retryWithToleranceOperator.reporters(List.of(reporterA, reporterB)); // Even though the reporters throw exceptions, they should both still be closed. doThrow(new RuntimeException()).when(reporterA).close(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java index 7783f267dfea0..10b715fe18d24 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java @@ -35,7 +35,7 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.concurrent.CompletableFuture; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -66,7 +66,7 @@ public void testGetFutures() { for (int i = 0; i < 4; i++) { TopicPartition topicPartition = new TopicPartition("topic", i); topicPartitions.add(topicPartition); - reporter.futures.put(topicPartition, Collections.singletonList(CompletableFuture.completedFuture(null))); + reporter.futures.put(topicPartition, List.of(CompletableFuture.completedFuture(null))); } assertFalse(reporter.futures.isEmpty()); reporter.awaitFutures(topicPartitions); @@ -105,7 +105,7 @@ private void initializeReporter(boolean errorsTolerated) { Time.SYSTEM, errorHandlingMetrics ); - retryWithToleranceOperator.reporters(Collections.singletonList(errorReporter)); + retryWithToleranceOperator.reporters(List.of(errorReporter)); reporter = new WorkerErrantRecordReporter( retryWithToleranceOperator, converter, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java index 79d5788ff436e..65e5b15128d93 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java @@ -29,9 +29,8 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -55,7 +54,7 @@ public class ConnectClusterStateImplTest { @BeforeEach public void setUp() { - expectedConnectors = Arrays.asList("sink1", "source1", "source2"); + expectedConnectors = List.of("sink1", "source1", "source2"); connectClusterState = new ConnectClusterStateImpl( herderRequestTimeoutMs, new ConnectClusterDetailsImpl(KAFKA_CLUSTER_ID), @@ -78,7 +77,7 @@ public void connectors() { @Test public void connectorConfig() { final String connName = "sink6"; - final Map expectedConfig = Collections.singletonMap("key", "value"); + final Map expectedConfig = Map.of("key", "value"); @SuppressWarnings("unchecked") ArgumentCaptor>> callback = ArgumentCaptor.forClass(Callback.class); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java index 749acb3e5b0b2..fd97935933a07 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java @@ -27,7 +27,6 @@ import java.net.MalformedURLException; import java.net.URL; -import java.util.Collections; import java.util.SortedSet; import java.util.TreeSet; @@ -73,15 +72,15 @@ public void setUp() { assertTrue(PluginUtils.shouldLoadInIsolation(pluginDesc.className())); sinkConnectors.add(pluginDesc); scanResult = new PluginScanResult( - sinkConnectors, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + sinkConnectors, + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/MultiVersionTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/MultiVersionTest.java new file mode 100644 index 0000000000000..c4a60df57e143 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/MultiVersionTest.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime.isolation; + +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.runtime.WorkerConfig; +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.connect.storage.HeaderConverter; + +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + + +public class MultiVersionTest { + + private static Plugins setUpPlugins(Map> artifacts, PluginDiscoveryMode mode) { + String pluginPath = artifacts.keySet().stream().map(Path::toString).collect(Collectors.joining(",")); + Map configs = new HashMap<>(); + configs.put(WorkerConfig.PLUGIN_PATH_CONFIG, pluginPath); + configs.put(WorkerConfig.PLUGIN_DISCOVERY_CONFIG, mode.name()); + return new Plugins(configs); + } + + private void assertPluginLoad(Map> artifacts, PluginDiscoveryMode mode) + throws InvalidVersionSpecificationException, ClassNotFoundException { + + Plugins plugins = setUpPlugins(artifacts, mode); + + for (Map.Entry> entry : artifacts.entrySet()) { + String pluginLocation = entry.getKey().toAbsolutePath().toString(); + + for (VersionedPluginBuilder.BuildInfo buildInfo : entry.getValue()) { + ClassLoader pluginLoader = plugins.pluginLoader(buildInfo.plugin().className(), PluginUtils.connectorVersionRequirement(buildInfo.version())); + Assertions.assertInstanceOf(PluginClassLoader.class, pluginLoader); + Assertions.assertTrue(((PluginClassLoader) pluginLoader).location().contains(pluginLocation)); + Object p = plugins.newPlugin(buildInfo.plugin().className(), PluginUtils.connectorVersionRequirement(buildInfo.version())); + Assertions.assertInstanceOf(Versioned.class, p); + Assertions.assertEquals(buildInfo.version(), ((Versioned) p).version()); + } + } + } + + private void assertCorrectLatestPluginVersion( + Map> artifacts, + PluginDiscoveryMode mode, + String latestVersion + ) { + Plugins plugins = setUpPlugins(artifacts, mode); + List classes = artifacts.values().stream() + .flatMap(List::stream) + .map(VersionedPluginBuilder.BuildInfo::plugin) + .map(VersionedPluginBuilder.VersionedTestPlugin::className) + .distinct() + .toList(); + for (String className : classes) { + String version = plugins.latestVersion(className, PluginType.values()); + Assertions.assertEquals(latestVersion, version); + } + } + + private static Map> buildIsolatedArtifacts( + String[] versions, + VersionedPluginBuilder.VersionedTestPlugin[] pluginTypes + ) throws IOException { + Map> artifacts = new HashMap<>(); + for (String v : versions) { + for (VersionedPluginBuilder.VersionedTestPlugin pluginType: pluginTypes) { + VersionedPluginBuilder builder = new VersionedPluginBuilder(); + builder.include(pluginType, v); + artifacts.put(builder.build(pluginType + "-" + v), builder.buildInfos()); + } + } + return artifacts; + } + + public static final String DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION; + public static final Map> DEFAULT_ISOLATED_ARTIFACTS; + public static final Map> DEFAULT_COMBINED_ARTIFACT; + public static final Plugins MULTI_VERSION_PLUGINS; + public static final Map DEFAULT_COMBINED_ARTIFACT_VERSIONS; + + static { + + String[] defaultIsolatedArtifactsVersions = new String[]{"1.1.0", "2.3.0", "4.3.0"}; + try { + DEFAULT_ISOLATED_ARTIFACTS = buildIsolatedArtifacts( + defaultIsolatedArtifactsVersions, VersionedPluginBuilder.VersionedTestPlugin.values() + ); + DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION = "4.3.0"; + DEFAULT_COMBINED_ARTIFACT_VERSIONS = new HashMap<>(); + + VersionedPluginBuilder builder = new VersionedPluginBuilder(); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR, k -> "0.0.0")); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR, k -> "0.1.0")); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER, k -> "0.2.0")); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER, k -> "0.3.0")); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION, k -> "0.4.0")); + builder.include(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE, + DEFAULT_COMBINED_ARTIFACT_VERSIONS.computeIfAbsent(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE, k -> "0.5.0")); + DEFAULT_COMBINED_ARTIFACT = Map.of(builder.build("all_versioned_artifact"), builder.buildInfos()); + + Map> artifacts = new HashMap<>(); + artifacts.putAll(DEFAULT_COMBINED_ARTIFACT); + artifacts.putAll(DEFAULT_ISOLATED_ARTIFACTS); + MULTI_VERSION_PLUGINS = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Test + public void testVersionedPluginLoaded() throws InvalidVersionSpecificationException, ClassNotFoundException { + assertPluginLoad(DEFAULT_COMBINED_ARTIFACT, PluginDiscoveryMode.SERVICE_LOAD); + assertPluginLoad(DEFAULT_COMBINED_ARTIFACT, PluginDiscoveryMode.ONLY_SCAN); + } + + @Test + public void testMultipleIsolatedVersionedPluginLoading() throws InvalidVersionSpecificationException, ClassNotFoundException { + assertPluginLoad(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD); + assertPluginLoad(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.ONLY_SCAN); + } + + @Test + public void testLatestVersion() { + assertCorrectLatestPluginVersion(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD, DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION); + assertCorrectLatestPluginVersion(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.ONLY_SCAN, DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION); + } + + @Test + public void testBundledPluginLoading() throws InvalidVersionSpecificationException, ClassNotFoundException { + + Plugins plugins = MULTI_VERSION_PLUGINS; + // get the connector loader of the combined artifact which includes all plugin types + ClassLoader connectorLoader = plugins.pluginLoader( + VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR.className(), + PluginUtils.connectorVersionRequirement("0.1.0") + ); + Assertions.assertInstanceOf(PluginClassLoader.class, connectorLoader); + + List pluginTypes = List.of( + VersionedPluginBuilder.VersionedTestPlugin.CONVERTER, + VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER, + VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION, + VersionedPluginBuilder.VersionedTestPlugin.PREDICATE + ); + // should match the version used in setUp for creating the combined artifact + List versions = pluginTypes.stream().map(DEFAULT_COMBINED_ARTIFACT_VERSIONS::get).toList(); + for (int i = 0; i < 4; i++) { + String className = pluginTypes.get(i).className(); + // when using the connector loader, the version and plugin returned should be from the ones in the combined artifact + String version = plugins.pluginVersion(className, connectorLoader, PluginType.values()); + Assertions.assertEquals(versions.get(i), version); + Object p = plugins.newPlugin(className, null, connectorLoader); + Assertions.assertInstanceOf(Versioned.class, p); + Assertions.assertEquals(versions.get(i), ((Versioned) p).version()); + + String latestVersion = plugins.latestVersion(className, PluginType.values()); + Assertions.assertEquals(DEFAULT_ISOLATED_ARTIFACTS_LATEST_VERSION, latestVersion); + } + } + + @Test + public void testCorrectVersionRange() throws IOException, InvalidVersionSpecificationException, ClassNotFoundException { + Map> artifacts = buildIsolatedArtifacts( + new String[]{"1.0.0", "1.1.0", "1.1.2", "2.0.0", "2.0.2", "3.0.0", "4.0.0"}, + VersionedPluginBuilder.VersionedTestPlugin.values() + ); + + Plugins plugins = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD); + Map requiredVersions = new HashMap<>(); + requiredVersions.put(PluginUtils.connectorVersionRequirement("latest"), "4.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement(null), "4.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("1.0.0"), "1.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("[2.0.2]"), "2.0.2"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,3.0.1]"), "3.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(,2.0.0)"), "1.1.2"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.0.0]"), "1.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("[2.0.0,)"), "4.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(,2.0.0],[2.0.3, 2.0.4)"), "2.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(2.0.0,3.0.0)"), "2.0.2"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.1.0),[4.1.1,)"), "1.0.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,1.1.0]"), "1.1.0"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("(,1.1.0),(2.0.0, 2.0.2]"), "2.0.2"); + requiredVersions.put(PluginUtils.connectorVersionRequirement("[1.1.0,1.1.3)"), "1.1.2"); + + for (Map.Entry entry : requiredVersions.entrySet()) { + for (VersionedPluginBuilder.VersionedTestPlugin pluginType: VersionedPluginBuilder.VersionedTestPlugin.values()) { + Object p = plugins.newPlugin(pluginType.className(), entry.getKey()); + Assertions.assertInstanceOf(Versioned.class, p); + Assertions.assertEquals(entry.getValue(), ((Versioned) p).version(), + String.format("Provided Version Range %s for class %s should return plugin version %s instead of %s", + entry.getKey(), pluginType.className(), entry.getValue(), ((Versioned) p).version())); + } + } + } + + @Test + public void testInvalidVersionRange() throws IOException, InvalidVersionSpecificationException { + String[] validVersions = new String[]{"1.0.0", "1.1.0", "1.1.2", "2.0.0", "2.0.2", "3.0.0", "4.0.0"}; + Map> artifacts = buildIsolatedArtifacts( + validVersions, + VersionedPluginBuilder.VersionedTestPlugin.values() + ); + + Plugins plugins = setUpPlugins(artifacts, PluginDiscoveryMode.SERVICE_LOAD); + Set invalidVersions = new HashSet<>(); + invalidVersions.add(PluginUtils.connectorVersionRequirement("0.9.0")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("[4.0.1,)")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("(4.0.0,)")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("[4.0.1]")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("(2.0.0, 2.0.1)")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("(,1.0.0)")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("(1.1.0, 1.1.2)")); + invalidVersions.add(PluginUtils.connectorVersionRequirement("(1.1.0, 1.1.2),[1.1.3, 2.0.0)")); + + for (VersionRange versionRange : invalidVersions) { + for (VersionedPluginBuilder.VersionedTestPlugin pluginType: VersionedPluginBuilder.VersionedTestPlugin.values()) { + VersionedPluginLoadingException e = Assertions.assertThrows(VersionedPluginLoadingException.class, () -> { + plugins.newPlugin(pluginType.className(), versionRange); + }, String.format("Provided Version Range %s for class %s should throw VersionedPluginLoadingException", versionRange, pluginType.className())); + Assertions.assertEquals(e.availableVersions(), List.of(validVersions)); + } + } + } + + @Test + public void testVersionedConverter() { + Plugins plugins = setUpPlugins(DEFAULT_ISOLATED_ARTIFACTS, PluginDiscoveryMode.SERVICE_LOAD); + Map converterConfig = new HashMap<>(); + converterConfig.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + converterConfig.put(WorkerConfig.KEY_CONVERTER_VERSION, "1.1.0"); + converterConfig.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + converterConfig.put(WorkerConfig.VALUE_CONVERTER_VERSION, "2.3.0"); + converterConfig.put(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className()); + converterConfig.put(WorkerConfig.HEADER_CONVERTER_VERSION, "4.3.0"); + converterConfig.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + + AbstractConfig config; + try (LoaderSwap swap = plugins.safeLoaderSwapper().apply(plugins.delegatingLoader())) { + config = new PluginsTest.TestableWorkerConfig(converterConfig); + } + + Converter keyConverter = plugins.newConverter(config, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION); + Assertions.assertEquals(keyConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + Assertions.assertInstanceOf(Versioned.class, keyConverter); + Assertions.assertEquals("1.1.0", ((Versioned) keyConverter).version()); + + Converter valueConverter = plugins.newConverter(config, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION); + Assertions.assertEquals(valueConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + Assertions.assertInstanceOf(Versioned.class, valueConverter); + Assertions.assertEquals("2.3.0", ((Versioned) valueConverter).version()); + + HeaderConverter headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION); + Assertions.assertEquals(headerConverter.getClass().getName(), VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className()); + Assertions.assertInstanceOf(Versioned.class, headerConverter); + Assertions.assertEquals("4.3.0", ((Versioned) headerConverter).version()); + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginRecommenderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginRecommenderTest.java new file mode 100644 index 0000000000000..a840c36a8f60f --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginRecommenderTest.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime.isolation; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.runtime.ConnectorConfig; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.kafka.connect.runtime.isolation.MultiVersionTest.DEFAULT_COMBINED_ARTIFACT_VERSIONS; +import static org.apache.kafka.connect.runtime.isolation.MultiVersionTest.DEFAULT_ISOLATED_ARTIFACTS; +import static org.apache.kafka.connect.runtime.isolation.MultiVersionTest.MULTI_VERSION_PLUGINS; + +public class PluginRecommenderTest { + + private Set allVersionsOf(String classOrAlias) { + Set versions = DEFAULT_ISOLATED_ARTIFACTS.values().stream() + .flatMap(List::stream) + .filter(b -> b.plugin().className().equals(classOrAlias)) + .map(VersionedPluginBuilder.BuildInfo::version) + .collect(Collectors.toSet()); + Arrays.stream(VersionedPluginBuilder.VersionedTestPlugin.values()).filter(p -> p.className().equals(classOrAlias)) + .forEach(r -> versions.add(DEFAULT_COMBINED_ARTIFACT_VERSIONS.get(r))); + return versions; + } + + @Test + public void testConnectorVersionRecommenders() { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + for (String connectorClass : List.of( + VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR.className(), + VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR.className()) + ) { + Set versions = recommender.connectorPluginVersionRecommender().validValues( + ConnectorConfig.CONNECTOR_CLASS_CONFIG, Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass) + ).stream().map(Object::toString).collect(Collectors.toSet()); + Set allVersions = allVersionsOf(connectorClass); + Assertions.assertEquals(allVersions.size(), versions.size()); + allVersions.forEach(v -> Assertions.assertTrue(versions.contains(v), "Missing version " + v + " for connector " + connectorClass)); + } + } + + @Test + @SuppressWarnings("rawtypes") + public void testConverterVersionRecommenders() throws ClassNotFoundException { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Map config = new HashMap<>(); + Class converterClass = MULTI_VERSION_PLUGINS.pluginClass(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + config.put(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, converterClass); + config.put(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, converterClass); + Set allVersions = allVersionsOf(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className()); + for (ConfigDef.Recommender r : List.of(recommender.keyConverterPluginVersionRecommender(), recommender.valueConverterPluginVersionRecommender())) { + Set versions = r.validValues(null, config).stream().map(Object::toString).collect(Collectors.toSet()); + Assertions.assertEquals(allVersions.size(), versions.size()); + allVersions.forEach(v -> Assertions.assertTrue(versions.contains(v), "Missing version " + v + " for converter")); + } + } + + @Test + @SuppressWarnings("rawtypes") + public void testHeaderConverterVersionRecommenders() throws ClassNotFoundException { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Map config = new HashMap<>(); + Class headerConverterClass = MULTI_VERSION_PLUGINS.pluginClass(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className()); + config.put(ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG, headerConverterClass); + Set versions = recommender.headerConverterPluginVersionRecommender().validValues(null, config).stream().map(Object::toString).collect(Collectors.toSet()); + Set allVersions = allVersionsOf(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className()); + Assertions.assertEquals(allVersions.size(), versions.size()); + allVersions.forEach(v -> Assertions.assertTrue(versions.contains(v), "Missing version " + v + " for header converter")); + } + + @Test + @SuppressWarnings("rawtypes") + public void testTransformationVersionRecommenders() throws ClassNotFoundException { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Class transformationClass = MULTI_VERSION_PLUGINS.pluginClass(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION.className()); + Set versions = recommender.transformationPluginRecommender("transforms.t1.type") + .validValues("transforms.t1.type", Map.of("transforms.t1.type", transformationClass)) + .stream().map(Object::toString).collect(Collectors.toSet()); + Set allVersions = allVersionsOf(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION.className()); + Assertions.assertEquals(allVersions.size(), versions.size()); + allVersions.forEach(v -> Assertions.assertTrue(versions.contains(v), "Missing version " + v + " for transformation")); + } + + @Test + @SuppressWarnings("rawtypes") + public void testPredicateVersionRecommenders() throws ClassNotFoundException { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Class predicateClass = MULTI_VERSION_PLUGINS.pluginClass(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE.className()); + Set versions = recommender.predicatePluginRecommender("predicates.p1.type") + .validValues("predicates.p1.type", Map.of("predicates.p1.type", predicateClass)) + .stream().map(Object::toString).collect(Collectors.toSet()); + Set allVersions = allVersionsOf(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE.className()); + Assertions.assertEquals(allVersions.size(), versions.size()); + allVersions.forEach(v -> Assertions.assertTrue(versions.contains(v), "Missing version " + v + " for predicate")); + } + + @Test + @SuppressWarnings("rawtypes") + public void testConverterPluginRecommender() { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Set converters = recommender.converterPluginRecommender().validValues(null, null) + .stream().map(c -> ((Class) c).getName()).collect(Collectors.toSet()); + Assertions.assertTrue(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className())); + // some sanity checks to ensure that other plugin types are not included + Assertions.assertFalse(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR.className())); + Assertions.assertFalse(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR.className())); + Assertions.assertFalse(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className())); + Assertions.assertFalse(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION.className())); + Assertions.assertFalse(converters.contains(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE.className())); + } + + @Test + @SuppressWarnings("rawtypes") + public void testHeaderConverterPluginRecommender() { + PluginsRecommenders recommender = new PluginsRecommenders(MULTI_VERSION_PLUGINS); + Set headerConverters = recommender.headerConverterPluginRecommender().validValues(null, null) + .stream().map(c -> ((Class) c).getName()).collect(Collectors.toSet()); + Assertions.assertTrue(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.HEADER_CONVERTER.className())); + // some sanity checks to ensure that other plugin types are not included + Assertions.assertFalse(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.SINK_CONNECTOR.className())); + Assertions.assertFalse(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.SOURCE_CONNECTOR.className())); + Assertions.assertFalse(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.CONVERTER.className())); + Assertions.assertFalse(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.TRANSFORMATION.className())); + Assertions.assertFalse(headerConverters.contains(VersionedPluginBuilder.VersionedTestPlugin.PREDICATE.className())); + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java index ca099976444e9..b253405204d2f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java @@ -26,7 +26,6 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collections; import java.util.HashSet; import java.util.Optional; import java.util.Set; @@ -50,7 +49,7 @@ static Stream parameters() { @ParameterizedTest @MethodSource("parameters") public void testScanningEmptyPluginPath(PluginScanner scanner) { - PluginScanResult result = scan(scanner, Collections.emptySet()); + PluginScanResult result = scan(scanner, Set.of()); assertTrue(result.isEmpty()); } @@ -69,7 +68,7 @@ public void testScanningPluginClasses(PluginScanner scanner) { public void testScanningInvalidUberJar(PluginScanner scanner) throws Exception { File newFile = new File(pluginDir, "invalid.jar"); newFile.createNewFile(); - PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -81,14 +80,14 @@ public void testScanningPluginDirContainsInvalidJarsOnly(PluginScanner scanner) newFile = new File(newFile, "invalid.jar"); newFile.createNewFile(); - PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); assertTrue(result.isEmpty()); } @ParameterizedTest @MethodSource("parameters") public void testScanningNoPlugins(PluginScanner scanner) { - PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -98,7 +97,7 @@ public void testScanningPluginDirEmpty(PluginScanner scanner) { File newFile = new File(pluginDir, "my-plugin"); newFile.mkdir(); - PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -116,7 +115,7 @@ public void testScanningMixOfValidAndInvalidPlugins(PluginScanner scanner) throw Files.copy(source, pluginPath.resolve(source.getFileName())); } - PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); Set classes = new HashSet<>(); result.forEach(pluginDesc -> classes.add(pluginDesc.className())); Set expectedClasses = new HashSet<>(TestPlugins.pluginClasses()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java index 23041f9c31937..24ef0d535b8a6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java @@ -113,7 +113,7 @@ public void testKafkaDependencyClasses() { @Test public void testConnectApiClasses() { - List apiClasses = Arrays.asList( + List apiClasses = List.of( // Enumerate all packages and classes "org.apache.kafka.connect.", "org.apache.kafka.connect.components.", @@ -201,7 +201,7 @@ public void testConnectApiClasses() { @Test public void testConnectRuntimeClasses() { // Only list packages, because there are too many classes. - List runtimeClasses = Arrays.asList( + List runtimeClasses = List.of( "org.apache.kafka.connect.cli.", //"org.apache.kafka.connect.connector.policy.", isolated by default //"org.apache.kafka.connect.converters.", isolated by default @@ -229,7 +229,7 @@ public void testConnectRuntimeClasses() { @Test public void testAllowedRuntimeClasses() { - List jsonConverterClasses = Arrays.asList( + List jsonConverterClasses = List.of( "org.apache.kafka.connect.connector.policy.", "org.apache.kafka.connect.connector.policy.AbstractConnectorClientConfigOverridePolicy", "org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy", @@ -256,7 +256,7 @@ public void testAllowedRuntimeClasses() { @Test public void testTransformsClasses() { - List transformsClasses = Arrays.asList( + List transformsClasses = List.of( "org.apache.kafka.connect.transforms.", "org.apache.kafka.connect.transforms.util.", "org.apache.kafka.connect.transforms.util.NonEmptyListValidator", @@ -309,7 +309,7 @@ public void testTransformsClasses() { @Test public void testAllowedJsonConverterClasses() { - List jsonConverterClasses = Arrays.asList( + List jsonConverterClasses = List.of( "org.apache.kafka.connect.json.", "org.apache.kafka.connect.json.DecimalFormat", "org.apache.kafka.connect.json.JsonConverter", @@ -326,7 +326,7 @@ public void testAllowedJsonConverterClasses() { @Test public void testAllowedFileConnectors() { - List jsonConverterClasses = Arrays.asList( + List jsonConverterClasses = List.of( "org.apache.kafka.connect.file.", "org.apache.kafka.connect.file.FileStreamSinkConnector", "org.apache.kafka.connect.file.FileStreamSinkTask", @@ -341,7 +341,7 @@ public void testAllowedFileConnectors() { @Test public void testAllowedBasicAuthExtensionClasses() { - List basicAuthExtensionClasses = Arrays.asList( + List basicAuthExtensionClasses = List.of( "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension" //"org.apache.kafka.connect.rest.basic.auth.extension.JaasBasicAuthFilter", TODO fix? //"org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule" TODO fix? @@ -377,13 +377,13 @@ public void testClientConfigProvider() { @Test public void testEmptyPluginUrls() throws Exception { - assertEquals(Collections.emptyList(), PluginUtils.pluginUrls(pluginPath)); + assertEquals(List.of(), PluginUtils.pluginUrls(pluginPath)); } @Test public void testEmptyStructurePluginUrls() throws Exception { createBasicDirectoryLayout(); - assertEquals(Collections.emptyList(), PluginUtils.pluginUrls(pluginPath)); + assertEquals(List.of(), PluginUtils.pluginUrls(pluginPath)); } @Test @@ -511,12 +511,12 @@ public void testNonCollidingAliases() { sinkConnectors, sourceConnectors, converters, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -538,14 +538,14 @@ public void testMultiVersionAlias() { assertEquals(2, sinkConnectors.size()); PluginScanResult result = new PluginScanResult( sinkConnectors, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -561,15 +561,15 @@ public void testCollidingPrunedAlias() { SortedSet> headerConverters = new TreeSet<>(); headerConverters.add(new PluginDesc<>(CollidingHeaderConverter.class, null, PluginType.HEADER_CONVERTER, CollidingHeaderConverter.class.getClassLoader())); PluginScanResult result = new PluginScanResult( - Collections.emptySortedSet(), - Collections.emptySortedSet(), + new TreeSet<>(), + new TreeSet<>(), converters, headerConverters, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -586,15 +586,15 @@ public void testCollidingSimpleAlias() { SortedSet>> transformations = new TreeSet<>(); transformations.add(new PluginDesc<>((Class>) (Class) Colliding.class, null, PluginType.TRANSFORMATION, Colliding.class.getClassLoader())); PluginScanResult result = new PluginScanResult( - Collections.emptySortedSet(), - Collections.emptySortedSet(), + new TreeSet<>(), + new TreeSet<>(), converters, - Collections.emptySortedSet(), + new TreeSet<>(), transformations, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java index ca4c29931d088..9492f9f7ea22e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java @@ -55,14 +55,13 @@ import java.net.URL; import java.net.URLClassLoader; import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; +import java.util.TreeSet; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -93,6 +92,7 @@ public void setup() { pluginProps.put(WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined()); plugins = new Plugins(pluginProps); props = new HashMap<>(pluginProps); + props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); props.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); props.put("key.converter." + JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "true"); @@ -106,17 +106,17 @@ public void setup() { SortedSet> sinkConnectors = (SortedSet>) plugins.sinkConnectors(); missingPluginClass = sinkConnectors.first().className(); nonEmpty = new PluginScanResult( - sinkConnectors, - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet(), - Collections.emptySortedSet() + sinkConnectors, + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>(), + new TreeSet<>() ); - empty = new PluginScanResult(Collections.emptyList()); + empty = new PluginScanResult(List.of()); createConfig(); } @@ -140,7 +140,7 @@ public void shouldInstantiateAndConfigureConverters() { @Test public void shouldInstantiateAndConfigureInternalConverters() { - instantiateAndConfigureInternalConverter(true, Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false")); + instantiateAndConfigureInternalConverter(true, Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false")); // Validate schemas.enable is set to false assertEquals("false", internalConverter.configs.get(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG)); } @@ -207,7 +207,7 @@ public void shouldInstantiateAndConfigureDefaultHeaderConverter() { public void shouldThrowIfPluginThrows() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.ALWAYS_THROW_EXCEPTION.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class )); } @@ -216,7 +216,7 @@ public void shouldThrowIfPluginThrows() { public void shouldFindCoLocatedPluginIfBadPackaging() { Converter converter = plugins.newPlugin( TestPlugin.BAD_PACKAGING_CO_LOCATED.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); assertNotNull(converter); @@ -226,7 +226,7 @@ public void shouldFindCoLocatedPluginIfBadPackaging() { public void shouldThrowIfPluginMissingSuperclass() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.BAD_PACKAGING_MISSING_SUPERCLASS.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class )); } @@ -242,7 +242,7 @@ public void shouldThrowIfStaticInitializerThrows() { public void shouldThrowIfStaticInitializerThrowsServiceLoader() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.BAD_PACKAGING_STATIC_INITIALIZER_THROWS_REST_EXTENSION.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), ConnectRestExtension.class )); } @@ -300,7 +300,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { // Plugins are not isolated from other instances of their own class. Converter firstPlugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); @@ -308,7 +308,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { Converter secondPlugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); @@ -323,7 +323,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { public void newPluginShouldServiceLoadWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.SERVICE_LOADER.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); @@ -339,7 +339,7 @@ public void newPluginShouldServiceLoadWithPluginClassLoader() { public void newPluginShouldInstantiateWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); @@ -428,7 +428,7 @@ public void newConnectorShouldInstantiateWithPluginClassLoader() { @Test public void newPluginsShouldConfigureWithPluginClassLoader() { List configurables = plugins.newPlugins( - Collections.singletonList(TestPlugin.SAMPLING_CONFIGURABLE.className()), + List.of(TestPlugin.SAMPLING_CONFIGURABLE.className()), config, Configurable.class ); @@ -596,7 +596,7 @@ public void testAliasesInConverters() throws ClassNotFoundException { String alias = "SamplingConverter"; assertTrue(TestPlugin.SAMPLING_CONVERTER.className().contains(alias)); ConfigDef def = new ConfigDef().define(configKey, ConfigDef.Type.CLASS, ConfigDef.Importance.HIGH, "docstring"); - AbstractConfig config = new AbstractConfig(def, Collections.singletonMap(configKey, alias)); + AbstractConfig config = new AbstractConfig(def, Map.of(configKey, alias)); assertNotNull(config.getClass(configKey)); assertNotNull(config.getConfiguredInstance(configKey, Converter.class)); @@ -625,7 +625,7 @@ private void assertClassLoaderReadsVersionFromResource( // Initialize Plugins object with parent class loader in the class loader tree. This is // to simulate the situation where jars exist on both system classpath and plugin path. - Map pluginProps = Collections.singletonMap( + Map pluginProps = Map.of( WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined(childResource) ); @@ -638,14 +638,14 @@ private void assertClassLoaderReadsVersionFromResource( Converter converter = plugins.newPlugin( className, - new AbstractConfig(new ConfigDef(), Collections.emptyMap()), + new AbstractConfig(new ConfigDef(), Map.of()), Converter.class ); // Verify the version was read from the correct resource assertEquals(expectedVersions[0], new String(converter.fromConnectData(null, null, null))); // When requesting multiple resources, they should be listed in the correct order - assertEquals(Arrays.asList(expectedVersions), + assertEquals(List.of(expectedVersions), converter.toConnectData(null, null).value()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java index a99235a1d1404..b4e5c578e836b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.runtime.isolation; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -42,7 +41,7 @@ public interface SamplingTestPlugin { * @return All known instances of this class, including this instance. */ default List allInstances() { - return Collections.singletonList(this); + return List.of(this); } /** @@ -50,7 +49,7 @@ default List allInstances() { * This should only return direct children, and not reference this instance directly */ default Map otherSamples() { - return Collections.emptyMap(); + return Map.of(); } /** diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java index cf2e53f1846c0..bc326c02ee37a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java @@ -43,7 +43,6 @@ import java.lang.management.ThreadInfo; import java.net.URL; import java.util.Arrays; -import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.BrokenBarrierException; @@ -71,7 +70,7 @@ public class SynchronizationTest { @BeforeEach public void setup(TestInfo testInfo) { - Map pluginProps = Collections.singletonMap( + Map pluginProps = Map.of( WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined() ); @@ -241,7 +240,7 @@ public void testSimultaneousUpwardAndDownwardDelegating() throws Exception { // 4. Load the isolated plugin class and return new AbstractConfig( new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), - Collections.singletonMap("a.class", t1Class)); + Map.of("a.class", t1Class)); } }; @@ -259,7 +258,7 @@ public void testSimultaneousUpwardAndDownwardDelegating() throws Exception { // 3. Enter the DelegatingClassLoader // 4. Load the non-isolated class and return new AbstractConfig(new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), - Collections.singletonMap("a.class", t2Class)); + Map.of("a.class", t2Class)); } }; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java index d4da3987a646b..5a86ddd7b5f35 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java @@ -298,7 +298,7 @@ public boolean includeByDefault() { if (pluginJars.containsKey(testPackage)) { log.debug("Skipping recompilation of {}", testPackage.resourceDir()); } - pluginJars.put(testPackage, createPluginJar(testPackage.resourceDir(), testPackage.removeRuntimeClasses())); + pluginJars.put(testPackage, createPluginJar(testPackage.resourceDir(), testPackage.removeRuntimeClasses(), Map.of())); } } catch (Throwable e) { log.error("Could not set up plugin test jars", e); @@ -372,7 +372,7 @@ public static List pluginClasses(TestPlugin... plugins) { .filter(Objects::nonNull) .map(TestPlugin::className) .distinct() - .collect(Collectors.toList()); + .toList(); } public static Function noOpLoaderSwap() { @@ -385,10 +385,11 @@ private static TestPlugin[] defaultPlugins() { .toArray(TestPlugin[]::new); } - private static Path createPluginJar(String resourceDir, Predicate removeRuntimeClasses) throws IOException { + + static Path createPluginJar(String resourceDir, Predicate removeRuntimeClasses, Map replacements) throws IOException { Path inputDir = resourceDirectoryPath("test-plugins/" + resourceDir); Path binDir = Files.createTempDirectory(resourceDir + ".bin."); - compileJavaSources(inputDir, binDir); + compileJavaSources(inputDir, binDir, replacements); Path jarFile = Files.createTempFile(resourceDir + ".", ".jar"); try (JarOutputStream jar = openJarFile(jarFile)) { writeJar(jar, inputDir, removeRuntimeClasses); @@ -448,7 +449,7 @@ private static void removeDirectory(Path binDir) throws IOException { * @param sourceDir Directory containing java source files * @throws IOException if the files cannot be compiled */ - private static void compileJavaSources(Path sourceDir, Path binDir) throws IOException { + private static void compileJavaSources(Path sourceDir, Path binDir, Map replacements) throws IOException { JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); List sourceFiles; try (Stream stream = Files.walk(sourceDir)) { @@ -456,13 +457,14 @@ private static void compileJavaSources(Path sourceDir, Path binDir) throws IOExc .filter(Files::isRegularFile) .map(Path::toFile) .filter(file -> file.getName().endsWith(".java")) - .collect(Collectors.toList()); + .map(file -> replacements.isEmpty() ? file : copyAndReplace(file, replacements)) + .toList(); } + StringWriter writer = new StringWriter(); - List options = Arrays.asList( + List options = List.of( "-d", binDir.toString() // Write class output to a different directory. ); - try (StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null)) { boolean success = compiler.getTask( writer, @@ -478,6 +480,21 @@ private static void compileJavaSources(Path sourceDir, Path binDir) throws IOExc } } + private static File copyAndReplace(File source, Map replacements) throws RuntimeException { + try { + String content = Files.readString(source.toPath()); + for (Map.Entry entry : replacements.entrySet()) { + content = content.replace(entry.getKey(), entry.getValue()); + } + File tmpFile = new File(System.getProperty("java.io.tmpdir") + File.separator + source.getName()); + Files.writeString(tmpFile.toPath(), content); + tmpFile.deleteOnExit(); + return tmpFile; + } catch (IOException e) { + throw new RuntimeException("Could not copy and replace file: " + source, e); + } + } + private static void writeJar(JarOutputStream jar, Path inputDir, Predicate removeRuntimeClasses) throws IOException { List paths; try (Stream stream = Files.walk(inputDir)) { @@ -503,5 +520,4 @@ private static void writeJar(JarOutputStream jar, Path inputDir, Predicate pluginBuilds; + + public VersionedPluginBuilder() { + pluginBuilds = new ArrayList<>(); + } + + public VersionedPluginBuilder include(VersionedTestPlugin plugin, String version) { + pluginBuilds.add(new BuildInfo(plugin, version)); + return this; + } + + public synchronized Path build(String pluginDir) throws IOException { + Path pluginDirPath = Files.createTempDirectory(pluginDir); + pluginDirPath.toFile().deleteOnExit(); + Path subDir = Files.createDirectory(pluginDirPath.resolve("lib")); + subDir.toFile().deleteOnExit(); + for (BuildInfo buildInfo : pluginBuilds) { + Path jarFile = TestPlugins.createPluginJar(buildInfo.plugin.resourceDir(), ignored -> false, Map.of(VERSION_PLACEHOLDER, buildInfo.version)); + Path targetJar = subDir.resolve(jarFile.getFileName()).toAbsolutePath(); + buildInfo.setLocation(targetJar.toString()); + targetJar.toFile().deleteOnExit(); + Files.move(jarFile, targetJar); + } + return pluginDirPath.toAbsolutePath(); + } + + public List buildInfos() { + return pluginBuilds; + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java index 9eedcd7c60a9e..a4368955504c6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java @@ -60,10 +60,9 @@ import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -212,7 +211,7 @@ public void checkCORSRequest(String corsDomain, String origin, String expectedHe doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(Arrays.asList("a", "b")).when(herder).connectors(); + doReturn(List.of("a", "b")).when(herder).connectors(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -255,7 +254,7 @@ public void testStandaloneConfig() throws IOException { doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(Arrays.asList("a", "b")).when(herder).connectors(); + doReturn(List.of("a", "b")).when(herder).connectors(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -277,8 +276,8 @@ public void testLoggerEndpointWithDefaults() throws IOException { doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(Collections.emptyList()).when(herder).setWorkerLoggerLevel(logger, loggingLevel); - doReturn(Collections.singletonMap(logger, new LoggerLevel(loggingLevel, lastModified))).when(herder).allLoggerLevels(); + doReturn(List.of()).when(herder).setWorkerLoggerLevel(logger, loggingLevel); + doReturn(Map.of(logger, new LoggerLevel(loggingLevel, lastModified))).when(herder).allLoggerLevels(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -295,7 +294,7 @@ public void testLoggerEndpointWithDefaults() throws IOException { Map expectedLogger = new HashMap<>(); expectedLogger.put("level", loggingLevel); expectedLogger.put("last_modified", lastModified); - Map> expectedLoggers = Collections.singletonMap(logger, expectedLogger); + Map> expectedLoggers = Map.of(logger, expectedLogger); Map> actualLoggers = mapper.readValue(responseStr, new TypeReference<>() { }); assertEquals(expectedLoggers, actualLoggers); } @@ -402,7 +401,7 @@ public void register(ConnectRestExtensionContext restPluginContext) { @Override public void withPluginMetrics(PluginMetrics metrics) { - metricName = metrics.metricName("name", "description", Map.of()); + metricName = metrics.metricName("name", "description", new LinkedHashMap<>()); metrics.addMetric(metricName, (Gauge) (config, now) -> called); } } @@ -437,7 +436,7 @@ private void checkCustomizedHttpResponseHeaders(String headerConfig, Map VALID_HEADER_CONFIGS = Arrays.asList( + private static final List VALID_HEADER_CONFIGS = List.of( "add \t Cache-Control: no-cache, no-store, must-revalidate", "add \r X-XSS-Protection: 1; mode=block", "\n add Strict-Transport-Security: max-age=31536000; includeSubDomains", @@ -48,7 +46,7 @@ public class RestServerConfigTest { "adDdate \n Last-Modified: \t 0" ); - private static final List INVALID_HEADER_CONFIGS = Arrays.asList( + private static final List INVALID_HEADER_CONFIGS = List.of( "set \t", "badaction \t X-Frame-Options:DENY", "set add X-XSS-Protection:1", @@ -70,11 +68,11 @@ public void testListenersConfigAllowedValues() { props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999"); config = RestServerConfig.forPublic(null, props); - assertEquals(Collections.singletonList("http://a.b:9999"), config.listeners()); + assertEquals(List.of("http://a.b:9999"), config.listeners()); props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); - assertEquals(Arrays.asList("http://a.b:9999", "https://a.b:7812"), config.listeners()); + assertEquals(List.of("http://a.b:9999", "https://a.b:7812"), config.listeners()); } @Test @@ -113,7 +111,7 @@ public void testAdminListenersConfigAllowedValues() { props.put(RestServerConfig.ADMIN_LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); - assertEquals(Arrays.asList("http://a.b:9999", "https://a.b:7812"), config.adminListeners()); + assertEquals(List.of("http://a.b:9999", "https://a.b:7812"), config.adminListeners()); RestServerConfig.forPublic(null, props); } @@ -124,7 +122,7 @@ public void testAdminListenersNotAllowingEmptyStrings() { props.put(RestServerConfig.ADMIN_LISTENERS_CONFIG, "http://a.b:9999,"); ConfigException ce = assertThrows(ConfigException.class, () -> RestServerConfig.forPublic(null, props)); - assertTrue(ce.getMessage().contains(" admin.listeners")); + assertTrue(ce.getMessage().contains("admin.listeners")); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java index 9731dd6969713..96ff0fc9a6240 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java @@ -18,8 +18,8 @@ import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,7 +43,7 @@ public void testConnectorOffsetsToMap() { offset2.put("offset", new byte[]{0x00, 0x1A}); ConnectorOffset connectorOffset2 = new ConnectorOffset(partition2, offset2); - ConnectorOffsets connectorOffsets = new ConnectorOffsets(Arrays.asList(connectorOffset1, connectorOffset2)); + ConnectorOffsets connectorOffsets = new ConnectorOffsets(List.of(connectorOffset1, connectorOffset2)); Map, Map> connectorOffsetsMap = connectorOffsets.toMap(); assertEquals(2, connectorOffsetsMap.size()); assertEquals(offset1, connectorOffsetsMap.get(partition1)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java index 9c01f1d92a6f5..3d8241e378b9b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java @@ -20,7 +20,7 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -33,7 +33,7 @@ public void testToTargetState() { assertEquals(TargetState.PAUSED, CreateConnectorRequest.InitialState.PAUSED.toTargetState()); assertEquals(TargetState.STOPPED, CreateConnectorRequest.InitialState.STOPPED.toTargetState()); - CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest("test-name", Collections.emptyMap(), null); + CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest("test-name", Map.of(), null); assertNull(createConnectorRequest.initialTargetState()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java index 1b8376db635d4..d510c3c475d1b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java @@ -71,7 +71,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -86,7 +85,6 @@ import jakarta.ws.rs.BadRequestException; -import static java.util.Arrays.asList; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -167,37 +165,37 @@ public class ConnectorPluginsResourceTest { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List connectorConfigValues = connectorConfigDef.validate(PROPS); List partialConnectorConfigValues = connectorConfigDef.validate(PARTIAL_PROPS); - ConfigInfos result = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), connectorConfigValues, Collections.emptyList()); - ConfigInfos partialResult = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), partialConnectorConfigValues, Collections.emptyList()); - List configs = new LinkedList<>(result.values()); - List partialConfigs = new LinkedList<>(partialResult.values()); + ConfigInfos result = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), connectorConfigValues, List.of()); + ConfigInfos partialResult = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), partialConnectorConfigValues, List.of()); + List configs = new LinkedList<>(result.configs()); + List partialConfigs = new LinkedList<>(partialResult.configs()); - ConfigKeyInfo configKeyInfo = new ConfigKeyInfo("test.string.config", "STRING", true, null, "HIGH", "Test configuration for string type.", null, -1, "NONE", "test.string.config", Collections.emptyList()); - ConfigValueInfo configValueInfo = new ConfigValueInfo("test.string.config", "testString", Collections.emptyList(), Collections.emptyList(), true); + ConfigKeyInfo configKeyInfo = new ConfigKeyInfo("test.string.config", "STRING", true, null, "HIGH", "Test configuration for string type.", null, -1, "NONE", "test.string.config", List.of()); + ConfigValueInfo configValueInfo = new ConfigValueInfo("test.string.config", "testString", List.of(), List.of(), true); ConfigInfo configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.int.config", "INT", true, null, "MEDIUM", "Test configuration for integer type.", "Test", 1, "MEDIUM", "test.int.config", Collections.emptyList()); - configValueInfo = new ConfigValueInfo("test.int.config", "1", asList("1", "2", "3"), Collections.emptyList(), true); + configKeyInfo = new ConfigKeyInfo("test.int.config", "INT", true, null, "MEDIUM", "Test configuration for integer type.", "Test", 1, "MEDIUM", "test.int.config", List.of()); + configValueInfo = new ConfigValueInfo("test.int.config", "1", List.of("1", "2", "3"), List.of(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.string.config.default", "STRING", false, "", "LOW", "Test configuration with default value.", null, -1, "NONE", "test.string.config.default", Collections.emptyList()); - configValueInfo = new ConfigValueInfo("test.string.config.default", "", Collections.emptyList(), Collections.emptyList(), true); + configKeyInfo = new ConfigKeyInfo("test.string.config.default", "STRING", false, "", "LOW", "Test configuration with default value.", null, -1, "NONE", "test.string.config.default", List.of()); + configValueInfo = new ConfigValueInfo("test.string.config.default", "", List.of(), List.of(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.list.config", "LIST", true, null, "HIGH", "Test configuration for list type.", "Test", 2, "LONG", "test.list.config", Collections.emptyList()); - configValueInfo = new ConfigValueInfo("test.list.config", "a,b", asList("a", "b", "c"), Collections.emptyList(), true); + configKeyInfo = new ConfigKeyInfo("test.list.config", "LIST", true, null, "HIGH", "Test configuration for list type.", "Test", 2, "LONG", "test.list.config", List.of()); + configValueInfo = new ConfigValueInfo("test.list.config", "a,b", List.of("a", "b", "c"), List.of(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), ERROR_COUNT, Collections.singletonList("Test"), configs); - PARTIAL_CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), PARTIAL_CONFIG_ERROR_COUNT, Collections.singletonList("Test"), partialConfigs); + CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), ERROR_COUNT, List.of("Test"), configs); + PARTIAL_CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), PARTIAL_CONFIG_ERROR_COUNT, List.of("Test"), partialConfigs); } private final Herder herder = mock(DistributedHerder.class); @@ -242,7 +240,7 @@ public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() th ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - Collections.singletonList("Test") + List.of("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -258,8 +256,8 @@ public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() th assertEquals(PARTIAL_CONFIG_INFOS.errorCount(), configInfos.errorCount()); assertEquals(PARTIAL_CONFIG_INFOS.groups(), configInfos.groups()); assertEquals( - new HashSet<>(PARTIAL_CONFIG_INFOS.values()), - new HashSet<>(configInfos.values()) + new HashSet<>(PARTIAL_CONFIG_INFOS.configs()), + new HashSet<>(configInfos.configs()) ); verify(herder).validateConnectorConfig(eq(PARTIAL_PROPS), any(), anyBoolean()); } @@ -286,7 +284,7 @@ public void testValidateConfigWithSimpleName() throws Throwable { ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - Collections.singletonList("Test") + List.of("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -300,7 +298,7 @@ public void testValidateConfigWithSimpleName() throws Throwable { assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); - assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); + assertEquals(new HashSet<>(CONFIG_INFOS.configs()), new HashSet<>(configInfos.configs())); verify(herder).validateConnectorConfig(eq(PROPS), any(), anyBoolean()); } @@ -326,7 +324,7 @@ public void testValidateConfigWithAlias() throws Throwable { ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - Collections.singletonList("Test") + List.of("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -340,7 +338,7 @@ public void testValidateConfigWithAlias() throws Throwable { assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); - assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); + assertEquals(new HashSet<>(CONFIG_INFOS.configs()), new HashSet<>(configInfos.configs())); verify(herder).validateConnectorConfig(eq(PROPS), any(), anyBoolean()); } @@ -374,8 +372,8 @@ public void testConnectorPluginsIncludesClassTypeAndVersionInformation() throws ClassLoader classLoader = ConnectorPluginsResourceTest.class.getClassLoader(); PluginInfo sinkInfo = new PluginInfo(new PluginDesc<>(SampleSinkConnector.class, SampleSinkConnector.VERSION, PluginType.SINK, classLoader)); PluginInfo sourceInfo = new PluginInfo(new PluginDesc<>(SampleSourceConnector.class, SampleSourceConnector.VERSION, PluginType.SOURCE, classLoader)); - assertEquals(PluginType.SINK.toString(), sinkInfo.type()); - assertEquals(PluginType.SOURCE.toString(), sourceInfo.type()); + assertEquals(PluginType.SINK.toString(), sinkInfo.type().toString()); + assertEquals(PluginType.SOURCE.toString(), sourceInfo.type().toString()); assertEquals(SampleSinkConnector.VERSION, sinkInfo.version()); assertEquals(SampleSourceConnector.VERSION, sourceInfo.version()); assertEquals(SampleSinkConnector.class.getName(), sinkInfo.className()); @@ -493,7 +491,7 @@ private static class IntegerRecommender implements Recommender { @Override public List validValues(String name, Map parsedConfig) { - return asList(1, 2, 3); + return List.of(1, 2, 3); } @Override @@ -505,7 +503,7 @@ public boolean visible(String name, Map parsedConfig) { private static class ListRecommender implements Recommender { @Override public List validValues(String name, Map parsedConfig) { - return asList("a", "b", "c"); + return List.of("a", "b", "c"); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java index 9dfead77220f6..26a4665824802 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java @@ -55,9 +55,7 @@ import java.net.URI; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -142,14 +140,14 @@ public class ConnectorsResourceTest { CONNECTOR_CONFIG_WITH_EMPTY_NAME.put(ConnectorConfig.NAME_CONFIG, ""); CONNECTOR_CONFIG_WITH_EMPTY_NAME.put("sample_config", "test_config"); } - private static final List CONNECTOR_TASK_NAMES = Arrays.asList( + private static final List CONNECTOR_TASK_NAMES = List.of( new ConnectorTaskId(CONNECTOR_NAME, 0), new ConnectorTaskId(CONNECTOR_NAME, 1) ); private static final List> TASK_CONFIGS = new ArrayList<>(); static { - TASK_CONFIGS.add(Collections.singletonMap("config", "value")); - TASK_CONFIGS.add(Collections.singletonMap("config", "other_value")); + TASK_CONFIGS.add(Map.of("config", "value")); + TASK_CONFIGS.add(Map.of("config", "other_value")); } private static final List TASK_INFOS = new ArrayList<>(); static { @@ -158,7 +156,7 @@ public class ConnectorsResourceTest { } private static final Set CONNECTOR_ACTIVE_TOPICS = new HashSet<>( - Arrays.asList("foo_topic", "bar_topic")); + List.of("foo_topic", "bar_topic")); private static final RestRequestTimeout REQUEST_TIMEOUT = RestRequestTimeout.constant( DEFAULT_REST_REQUEST_TIMEOUT_MS, @@ -196,16 +194,16 @@ public void testListConnectors() { MultivaluedMap queryParams = new MultivaluedHashMap<>(); queryParams.putSingle("forward", "true"); when(forward.getQueryParameters()).thenReturn(queryParams); - when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); Collection connectors = (Collection) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), new HashSet<>(connectors)); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), new HashSet<>(connectors)); } @Test public void testExpandConnectorsStatus() { - when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorStateInfo connector = mock(ConnectorStateInfo.class); ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class); when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2); @@ -218,14 +216,14 @@ public void testExpandConnectorsStatus() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("status")); } @Test public void testExpandConnectorsInfo() { - when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorInfo connector = mock(ConnectorInfo.class); ConnectorInfo connector2 = mock(ConnectorInfo.class); when(herder.connectorInfo(CONNECTOR2_NAME)).thenReturn(connector2); @@ -238,14 +236,14 @@ public void testExpandConnectorsInfo() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("info")); } @Test public void testFullExpandConnectors() { - when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorInfo connectorInfo = mock(ConnectorInfo.class); ConnectorInfo connectorInfo2 = mock(ConnectorInfo.class); when(herder.connectorInfo(CONNECTOR2_NAME)).thenReturn(connectorInfo2); @@ -257,12 +255,12 @@ public void testFullExpandConnectors() { forward = mock(UriInfo.class); MultivaluedMap queryParams = new MultivaluedHashMap<>(); - queryParams.put("expand", Arrays.asList("info", "status")); + queryParams.put("expand", List.of("info", "status")); when(forward.getQueryParameters()).thenReturn(queryParams); Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connectorInfo2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connectorInfo, expanded.get(CONNECTOR_NAME).get("info")); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); @@ -271,7 +269,7 @@ public void testFullExpandConnectors() { @Test public void testExpandConnectorsWithConnectorNotFound() { - when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class); when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2); doThrow(mock(NotFoundException.class)).when(herder).connectorStatus(CONNECTOR_NAME); @@ -283,7 +281,7 @@ public void testExpandConnectorsWithConnectorNotFound() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Collections.singleton(CONNECTOR2_NAME), expanded.keySet()); + assertEquals(Set.of(CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); } @@ -291,7 +289,7 @@ public void testExpandConnectorsWithConnectorNotFound() { @Test public void testCreateConnector() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -304,7 +302,7 @@ public void testCreateConnector() throws Throwable { @Test public void testCreateConnectorWithPausedInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.PAUSED); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.PAUSED); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -317,7 +315,7 @@ public void testCreateConnectorWithPausedInitialState() throws Throwable { @Test public void testCreateConnectorWithStoppedInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.STOPPED); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.STOPPED); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -330,7 +328,7 @@ public void testCreateConnectorWithStoppedInitialState() throws Throwable { @Test public void testCreateConnectorWithRunningInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.RUNNING); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.RUNNING); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -343,7 +341,7 @@ public void testCreateConnectorWithRunningInitialState() throws Throwable { @Test public void testCreateConnectorNotLeader() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackNotLeaderException(cb).when(herder) @@ -357,7 +355,7 @@ public void testCreateConnectorNotLeader() throws Throwable { @Test public void testCreateConnectorWithHeaders() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); HttpHeaders httpHeaders = mock(HttpHeaders.class); expectAndCallbackNotLeaderException(cb) @@ -371,7 +369,7 @@ public void testCreateConnectorWithHeaders() throws Throwable { @Test public void testCreateConnectorExists() { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new AlreadyExistsException("already exists")) @@ -496,7 +494,7 @@ public void testGetTaskConfigs() throws Throwable { connectorTask0Configs.put("connector-task1-config0", "321"); connectorTask0Configs.put("connector-task1-config1", "654"); final ConnectorTaskId connector2Task0 = new ConnectorTaskId(CONNECTOR2_NAME, 0); - final Map connector2Task0Configs = Collections.singletonMap("connector2-task0-config0", "789"); + final Map connector2Task0Configs = Map.of("connector2-task0-config0", "789"); final List expectedTasksConnector = new ArrayList<>(); expectedTasksConnector.add(new TaskInfo(connectorTask0, connectorTask0Configs)); @@ -529,7 +527,7 @@ public void testPutConnectorConfig() throws Throwable { @Test public void testCreateConnectorWithSpecialCharsInName() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME_SPECIAL_CHARS, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_SPECIAL_CHARS), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_SPECIAL_CHARS), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME_SPECIAL_CHARS, CONNECTOR_CONFIG, @@ -544,7 +542,7 @@ public void testCreateConnectorWithSpecialCharsInName() throws Throwable { @Test public void testCreateConnectorWithControlSequenceInName() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME_CONTROL_SEQUENCES1, - Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_CONTROL_SEQUENCES1), null); + Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_CONTROL_SEQUENCES1), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME_CONTROL_SEQUENCES1, CONNECTOR_CONFIG, @@ -688,11 +686,9 @@ public void testRestartConnectorAndTasksRebalanceNeeded() { @Test public void testRestartConnectorAndTasksRequestAccepted() throws Throwable { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), - "foo", - null + AbstractStatus.State.RESTARTING.name(), "foo", null, null ); - ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, Collections.emptyList(), ConnectorType.SOURCE); + ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, List.of(), ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, false); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); @@ -861,9 +857,9 @@ public void testGetOffsetsConnectorNotFound() { @Test public void testGetOffsets() throws Throwable { final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); - ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( - new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), - new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) + ConnectorOffsets offsets = new ConnectorOffsets(List.of( + new ConnectorOffset(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + new ConnectorOffset(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")) )); expectAndCallbackResult(cb, offsets).when(herder).connectorOffsets(eq(CONNECTOR_NAME), cb.capture()); @@ -873,7 +869,7 @@ public void testGetOffsets() throws Throwable { @Test public void testAlterOffsetsEmptyOffsets() { assertThrows(BadRequestException.class, () -> connectorsResource.alterConnectorOffsets( - false, NULL_HEADERS, CONNECTOR_NAME, new ConnectorOffsets(Collections.emptyList()))); + false, NULL_HEADERS, CONNECTOR_NAME, new ConnectorOffsets(List.of()))); } @Test @@ -881,7 +877,7 @@ public void testAlterOffsetsNotLeader() throws Throwable { Map partition = new HashMap<>(); Map offset = new HashMap<>(); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackNotLeaderException(cb).when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture()); @@ -896,7 +892,7 @@ public void testAlterOffsetsConnectorNotFound() { Map partition = new HashMap<>(); Map offset = new HashMap<>(); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new NotFoundException("Connector not found")) .when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture()); @@ -906,10 +902,10 @@ public void testAlterOffsetsConnectorNotFound() { @Test public void testAlterOffsets() throws Throwable { - Map partition = Collections.singletonMap("partitionKey", "partitionValue"); - Map offset = Collections.singletonMap("offsetKey", "offsetValue"); + Map partition = Map.of("partitionKey", "partitionValue"); + Map offset = Map.of("offsetKey", "offsetValue"); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java index aee85a86c2ab2..6cbe164e26213 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java @@ -39,7 +39,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Base64; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -66,8 +65,8 @@ public class InternalConnectResourceTest { private static final HttpHeaders NULL_HEADERS = null; private static final List> TASK_CONFIGS = new ArrayList<>(); static { - TASK_CONFIGS.add(Collections.singletonMap("config", "value")); - TASK_CONFIGS.add(Collections.singletonMap("config", "other_value")); + TASK_CONFIGS.add(Map.of("config", "value")); + TASK_CONFIGS.add(Map.of("config", "other_value")); } private static final String FENCE_PATH = "/connectors/" + CONNECTOR_NAME + "/fence"; private static final String TASK_CONFIGS_PATH = "/connectors/" + CONNECTOR_NAME + "/tasks"; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java index c73bba8c84368..67ccb519d05ef 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java @@ -32,8 +32,8 @@ import org.slf4j.event.Level; import java.util.Arrays; -import java.util.Collections; import java.util.List; +import java.util.Map; import jakarta.ws.rs.core.Response; @@ -90,7 +90,7 @@ public void setLevelWithEmptyArgTest() { BadRequestException.class, () -> loggingResource.setLevel( "@root", - Collections.emptyMap(), + Map.of(), scope ) ); @@ -104,7 +104,7 @@ public void setLevelWithInvalidArgTest() { NotFoundException.class, () -> loggingResource.setLevel( "@root", - Collections.singletonMap("level", "HIGH"), + Map.of("level", "HIGH"), scope ) ); @@ -130,7 +130,7 @@ public void testSetLevelWorkerScope() { private void testSetLevelWorkerScope(String scope, boolean expectWarning) { final String logger = "org.apache.kafka.connect"; final String level = "TRACE"; - final List expectedLoggers = Arrays.asList( + final List expectedLoggers = List.of( "org.apache.kafka.connect", "org.apache.kafka.connect.runtime.distributed.DistributedHerder" ); @@ -138,7 +138,7 @@ private void testSetLevelWorkerScope(String scope, boolean expectWarning) { List actualLoggers; try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(LoggingResource.class)) { - Response response = loggingResource.setLevel(logger, Collections.singletonMap("level", level), scope); + Response response = loggingResource.setLevel(logger, Map.of("level", level), scope); assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); actualLoggers = (List) response.getEntity(); long warningMessages = logCaptureAppender.getEvents().stream() @@ -159,7 +159,7 @@ public void testSetLevelClusterScope() { final String logger = "org.apache.kafka.connect"; final String level = "TRACE"; - Response response = loggingResource.setLevel(logger, Collections.singletonMap("level", level), "cluster"); + Response response = loggingResource.setLevel(logger, Map.of("level", level), "cluster"); assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); assertNull(response.getEntity()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java index c8c746ffe71cc..c9eedccdc64ee 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java @@ -25,8 +25,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -156,7 +156,7 @@ public void testCreateServerSideSslContextFactoryDefaultValues() { assertEquals(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ssl.getKeyStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ssl.getTrustStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, ssl.getProtocol()); - assertArrayEquals(Arrays.asList(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); + assertArrayEquals(List.of(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); assertEquals(SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ssl.getKeyManagerFactoryAlgorithm()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ssl.getTrustManagerFactoryAlgorithm()); assertFalse(ssl.getNeedClientAuth()); @@ -181,7 +181,7 @@ public void testCreateClientSideSslContextFactoryDefaultValues() { assertEquals(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ssl.getKeyStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ssl.getTrustStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, ssl.getProtocol()); - assertArrayEquals(Arrays.asList(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); + assertArrayEquals(List.of(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); assertEquals(SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ssl.getKeyManagerFactoryAlgorithm()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ssl.getTrustManagerFactoryAlgorithm()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java index 4d8c25932fe42..c4d52fe4b4e5b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java @@ -48,6 +48,7 @@ private Map sslProps() { private Map baseWorkerProps() { return new HashMap<>() { { + put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, "/tmp/foo"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java index 9e893e79eba0e..dc6325bfa36bc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java @@ -75,20 +75,16 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static java.util.Collections.emptyList; -import static java.util.Collections.singleton; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.TopicCreationConfig.DEFAULT_TOPIC_CREATION_PREFIX; import static org.apache.kafka.connect.runtime.TopicCreationConfig.PARTITIONS_CONFIG; import static org.apache.kafka.connect.runtime.TopicCreationConfig.REPLICATION_FACTOR_CONFIG; @@ -202,7 +198,7 @@ public void testCreateConnectorFailedValidation() { when(connectorMock.config()).thenReturn(new ConfigDef()); ConfigValue validatedValue = new ConfigValue("foo.bar"); - when(connectorMock.validate(config)).thenReturn(new Config(singletonList(validatedValue))); + when(connectorMock.validate(config)).thenReturn(new Config(List.of(validatedValue))); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); @@ -265,7 +261,7 @@ public void testCreateConnectorWithStoppedInitialState() throws Exception { herder.putConnectorConfig(CONNECTOR_NAME, config, TargetState.STOPPED, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); assertEquals( - new ConnectorInfo(CONNECTOR_NAME, connectorConfig(SourceSink.SINK), Collections.emptyList(), ConnectorType.SINK), + new ConnectorInfo(CONNECTOR_NAME, connectorConfig(SourceSink.SINK), List.of(), ConnectorType.SINK), connectorInfo.result() ); verify(loaderSwap).close(); @@ -279,7 +275,8 @@ public void testDestroyConnector() throws Exception { Map config = connectorConfig(SourceSink.SOURCE); expectConfigValidation(SourceSink.SOURCE, config); - when(statusBackingStore.getAll(CONNECTOR_NAME)).thenReturn(Collections.emptyList()); + when(statusBackingStore.getAll(CONNECTOR_NAME)).thenReturn(List.of()); + when(worker.connectorVersion(CONNECTOR_NAME)).thenReturn(null); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); @@ -314,11 +311,11 @@ public void testRestartConnectorSameTaskConfigs() throws Exception { mockStartConnector(config, TargetState.STARTED, TargetState.STARTED, null); - when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); when(worker.getPlugins()).thenReturn(plugins); // same task configs as earlier, so don't expect a new set of tasks to be brought up when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, config, true))) - .thenReturn(Collections.singletonList(taskConfig(SourceSink.SOURCE))); + .thenReturn(List.of(taskConfig(SourceSink.SOURCE))); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); @@ -345,13 +342,13 @@ public void testRestartConnectorNewTaskConfigs() throws Exception { mockStartConnector(config, TargetState.STARTED, TargetState.STARTED, null); - when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); when(worker.getPlugins()).thenReturn(plugins); // changed task configs, expect a new set of tasks to be brought up (and the old ones to be stopped) Map taskConfigs = taskConfig(SourceSink.SOURCE); taskConfigs.put("k", "v"); when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, config, true))) - .thenReturn(Collections.singletonList(taskConfigs)); + .thenReturn(List.of(taskConfigs)); when(worker.startSourceTask(eq(new ConnectorTaskId(CONNECTOR_NAME, 0)), any(), eq(connectorConfig(SourceSink.SOURCE)), eq(taskConfigs), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -400,13 +397,13 @@ public void testRestartTask() throws Exception { ClusterConfigState configState = new ClusterConfigState( -1, null, - Collections.singletonMap(CONNECTOR_NAME, 1), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), - Collections.singletonMap(taskId, taskConfig(SourceSink.SOURCE)), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Map.of(CONNECTOR_NAME, 1), + Map.of(CONNECTOR_NAME, connectorConfig), + Map.of(CONNECTOR_NAME, TargetState.STARTED), + Map.of(taskId, taskConfig(SourceSink.SOURCE)), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -435,13 +432,13 @@ public void testRestartTaskFailureOnStart() throws Exception { ClusterConfigState configState = new ClusterConfigState( -1, null, - Collections.singletonMap(CONNECTOR_NAME, 1), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), - Collections.singletonMap(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Map.of(CONNECTOR_NAME, 1), + Map.of(CONNECTOR_NAME, connectorConfig), + Map.of(CONNECTOR_NAME, TargetState.STARTED), + Map.of(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -533,6 +530,7 @@ public void testRestartConnectorAndTasksOnlyConnector() throws Exception { expectConfigValidation(SourceSink.SINK, connectorConfig); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); + when(worker.connectorVersion(CONNECTOR_NAME)).thenReturn(null); mockStartConnector(connectorConfig, null, TargetState.STARTED, null); @@ -558,27 +556,28 @@ public void testRestartConnectorAndTasksOnlyTasks() throws Exception { when(restartPlan.shouldRestartTasks()).thenReturn(true); when(restartPlan.restartTaskCount()).thenReturn(1); when(restartPlan.totalTaskCount()).thenReturn(1); - when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); when(restartPlan.restartConnectorStateInfo()).thenReturn(connectorStateInfo); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); expectAdd(SourceSink.SINK); + when(worker.taskVersion(any())).thenReturn(null); Map connectorConfig = connectorConfig(SourceSink.SINK); expectConfigValidation(SourceSink.SINK, connectorConfig); - doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); + doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); ClusterConfigState configState = new ClusterConfigState( -1, null, - Collections.singletonMap(CONNECTOR_NAME, 1), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), - Collections.singletonMap(taskId, taskConfig(SourceSink.SINK)), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Map.of(CONNECTOR_NAME, 1), + Map.of(CONNECTOR_NAME, connectorConfig), + Map.of(CONNECTOR_NAME, TargetState.STARTED), + Map.of(taskId, taskConfig(SourceSink.SINK)), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -609,32 +608,34 @@ public void testRestartConnectorAndTasksBoth() throws Exception { when(restartPlan.shouldRestartTasks()).thenReturn(true); when(restartPlan.restartTaskCount()).thenReturn(1); when(restartPlan.totalTaskCount()).thenReturn(1); - when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); when(restartPlan.restartConnectorStateInfo()).thenReturn(connectorStateInfo); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); ArgumentCaptor taskStatus = ArgumentCaptor.forClass(TaskStatus.class); expectAdd(SourceSink.SINK, false); + when(worker.connectorVersion(any())).thenReturn(null); + when(worker.taskVersion(any())).thenReturn(null); Map connectorConfig = connectorConfig(SourceSink.SINK); expectConfigValidation(SourceSink.SINK, connectorConfig); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); - doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); + doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); mockStartConnector(connectorConfig, null, TargetState.STARTED, null); ClusterConfigState configState = new ClusterConfigState( -1, null, - Collections.singletonMap(CONNECTOR_NAME, 1), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), - Collections.singletonMap(taskId, taskConfig(SourceSink.SINK)), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Map.of(CONNECTOR_NAME, 1), + Map.of(CONNECTOR_NAME, connectorConfig), + Map.of(CONNECTOR_NAME, TargetState.STARTED), + Map.of(taskId, taskConfig(SourceSink.SINK)), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -689,7 +690,7 @@ public void testAccessors() throws Exception { Callback> taskConfigsCb = mock(Callback.class); // Check accessors with empty worker - doNothing().when(listConnectorsCb).onCompletion(null, Collections.EMPTY_SET); + doNothing().when(listConnectorsCb).onCompletion(null, Set.of()); doNothing().when(connectorInfoCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(taskConfigsCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(connectorConfigCb).onCompletion(any(NotFoundException.class), isNull()); @@ -698,13 +699,13 @@ public void testAccessors() throws Exception { expectConfigValidation(SourceSink.SOURCE, connConfig); // Validate accessors with 1 connector - doNothing().when(listConnectorsCb).onCompletion(null, singleton(CONNECTOR_NAME)); - ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), + doNothing().when(listConnectorsCb).onCompletion(null, Set.of(CONNECTOR_NAME)); + ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); doNothing().when(connectorInfoCb).onCompletion(null, connInfo); TaskInfo taskInfo = new TaskInfo(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)); - doNothing().when(taskConfigsCb).onCompletion(null, singletonList(taskInfo)); + doNothing().when(taskConfigsCb).onCompletion(null, List.of(taskInfo)); // All operations are synchronous for StandaloneHerder, so we don't need to actually wait after making each call herder.connectors(listConnectorsCb); @@ -753,8 +754,8 @@ public void testPutConnectorConfig() throws Exception { // Generate same task config, but from different connector config, resulting // in task restarts when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, newConnConfig, true))) - .thenReturn(singletonList(taskConfig(SourceSink.SOURCE))); - doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); + .thenReturn(List.of(taskConfig(SourceSink.SOURCE))); + doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); doNothing().when(statusBackingStore).put(new TaskStatus(taskId, TaskStatus.State.DESTROYED, WORKER_ID, 0)); when(worker.startSourceTask(eq(taskId), any(), eq(newConnConfig), eq(taskConfig(SourceSink.SOURCE)), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -768,7 +769,7 @@ public void testPutConnectorConfig() throws Exception { doNothing().when(connectorConfigCb).onCompletion(null, newConnConfig); herder.putConnectorConfig(CONNECTOR_NAME, newConnConfig, true, reconfigureCallback); Herder.Created newConnectorInfo = reconfigureCallback.get(1000L, TimeUnit.SECONDS); - ConnectorInfo newConnInfo = new ConnectorInfo(CONNECTOR_NAME, newConnConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), + ConnectorInfo newConnInfo = new ConnectorInfo(CONNECTOR_NAME, newConnConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); assertEquals(newConnInfo, newConnectorInfo.result()); @@ -847,7 +848,7 @@ private void expectConnectorStartingWithoutTasks(Map config, boo eq(herder), eq(TargetState.STARTED), onStart.capture()); ConnectorConfig connConfig = new SourceConnectorConfig(plugins, config, true); when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)) - .thenReturn(emptyList()); + .thenReturn(List.of()); } @Test @@ -856,7 +857,7 @@ public void testPutTaskConfigs() { Callback cb = mock(Callback.class); assertThrows(UnsupportedOperationException.class, () -> herder.putTaskConfigs(CONNECTOR_NAME, - singletonList(singletonMap("config", "value")), cb, null)); + List.of(Map.of("config", "value")), cb, null)); } @Test @@ -869,11 +870,11 @@ public void testCorruptConfig() { config.put(SinkConnectorConfig.TOPICS_CONFIG, TOPICS_LIST_STR); Connector connectorMock = mock(SinkConnector.class); String error = "This is an error in your config!"; - List errors = new ArrayList<>(singletonList(error)); + List errors = new ArrayList<>(List.of(error)); String key = "foo.invalid.key"; when(connectorMock.validate(config)).thenReturn( new Config( - singletonList(new ConfigValue(key, null, Collections.emptyList(), errors)) + List.of(new ConfigValue(key, null, List.of(), errors)) ) ); ConfigDef configDef = new ConfigDef(); @@ -933,7 +934,7 @@ public void testTargetStates() throws Exception { verify(statusBackingStore).put(new TaskStatus(new ConnectorTaskId(CONNECTOR_NAME, 0), AbstractStatus.State.DESTROYED, WORKER_ID, 0)); stopCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); herder.taskConfigs(CONNECTOR_NAME, taskConfigsCallback); - assertEquals(Collections.emptyList(), taskConfigsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); + assertEquals(List.of(), taskConfigsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); // herder.stop() should stop any running connectors and tasks even if destroyConnector was not invoked herder.stop(); @@ -948,7 +949,7 @@ public void testModifyConnectorOffsetsUnknownConnector() { initialize(false); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets("unknown-connector", - Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), alterOffsetsCallback); ExecutionException e = assertThrows(ExecutionException.class, () -> alterOffsetsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); assertInstanceOf(NotFoundException.class, e.getCause()); @@ -967,20 +968,20 @@ public void testModifyConnectorOffsetsConnectorNotInStoppedState() { herder.configState = new ClusterConfigState( 10, null, - Collections.singletonMap(CONNECTOR_NAME, 3), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Collections.singletonMap(CONNECTOR_NAME, TargetState.PAUSED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_NAME, 3), + Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Map.of(CONNECTOR_NAME, TargetState.PAUSED), + Map.of(), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Set.of(), + Set.of() ); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets(CONNECTOR_NAME, - Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), alterOffsetsCallback); ExecutionException e = assertThrows(ExecutionException.class, () -> alterOffsetsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); assertInstanceOf(BadRequestException.class, e.getCause()); @@ -1006,19 +1007,19 @@ public void testAlterConnectorOffsets() throws Exception { herder.configState = new ClusterConfigState( 10, null, - Collections.singletonMap(CONNECTOR_NAME, 0), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STOPPED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_NAME, 0), + Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Map.of(CONNECTOR_NAME, TargetState.STOPPED), + Map.of(), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Set.of(), + Set.of() ); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets(CONNECTOR_NAME, - Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), alterOffsetsCallback); assertEquals(msg, alterOffsetsCallback.get(1000, TimeUnit.MILLISECONDS)); } @@ -1039,15 +1040,15 @@ public void testResetConnectorOffsets() throws Exception { herder.configState = new ClusterConfigState( 10, null, - Collections.singletonMap(CONNECTOR_NAME, 0), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STOPPED), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Collections.emptySet(), - Collections.emptySet() + Map.of(CONNECTOR_NAME, 0), + Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Map.of(CONNECTOR_NAME, TargetState.STOPPED), + Map.of(), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Set.of(), + Set.of() ); FutureCallback resetOffsetsCallback = new FutureCallback<>(); herder.resetConnectorOffsets(CONNECTOR_NAME, resetOffsetsCallback); @@ -1073,7 +1074,7 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { assertEquals(createdInfo(SourceSink.SOURCE), connectorInfo.result()); // Prepare for task config update - when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); expectStop(); @@ -1085,8 +1086,8 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { updatedTaskConfig2.put("dummy-task-property", "2"); when(worker.connectorTaskConfigs(eq(CONNECTOR_NAME), any())) .thenReturn( - Collections.singletonList(updatedTaskConfig1), - Collections.singletonList(updatedTaskConfig2)); + List.of(updatedTaskConfig1), + List.of(updatedTaskConfig2)); // Set new config on the connector and tasks FutureCallback> reconfigureCallback = new FutureCallback<>(); @@ -1097,7 +1098,7 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { // Wait on connector update Herder.Created updatedConnectorInfo = reconfigureCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); - ConnectorInfo expectedConnectorInfo = new ConnectorInfo(CONNECTOR_NAME, newConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); + ConnectorInfo expectedConnectorInfo = new ConnectorInfo(CONNECTOR_NAME, newConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); assertEquals(expectedConnectorInfo, updatedConnectorInfo.result()); verify(statusBackingStore, times(2)).put(new TaskStatus(new ConnectorTaskId(CONNECTOR_NAME, 0), TaskStatus.State.DESTROYED, WORKER_ID, 0)); @@ -1124,6 +1125,7 @@ private void expectAdd(SourceSink sourceSink, } when(worker.isRunning(CONNECTOR_NAME)).thenReturn(true); + if (sourceSink == SourceSink.SOURCE) { when(worker.isTopicCreationEnabled()).thenReturn(true); } @@ -1134,24 +1136,25 @@ private void expectAdd(SourceSink sourceSink, Map generatedTaskProps = taskConfig(sourceSink); if (mockConnectorTaskConfigs) { - when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).thenReturn(singletonList(generatedTaskProps)); + when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).thenReturn(List.of(generatedTaskProps)); } ClusterConfigState configState = new ClusterConfigState( -1, null, - Collections.singletonMap(CONNECTOR_NAME, 1), - Collections.singletonMap(CONNECTOR_NAME, connectorConfig), - Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), - Collections.singletonMap(new ConnectorTaskId(CONNECTOR_NAME, 0), generatedTaskProps), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Map.of(CONNECTOR_NAME, 1), + Map.of(CONNECTOR_NAME, connectorConfig), + Map.of(CONNECTOR_NAME, TargetState.STARTED), + Map.of(new ConnectorTaskId(CONNECTOR_NAME, 0), generatedTaskProps), + Map.of(), + Map.of(), + Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); if (sourceSink.equals(SourceSink.SOURCE) && mockStartSourceTask) { + when(worker.taskVersion(any())).thenReturn(null); when(worker.startSourceTask(new ConnectorTaskId(CONNECTOR_NAME, 0), configState, connectorConfig(sourceSink), generatedTaskProps, herder, TargetState.STARTED)).thenReturn(true); } @@ -1185,13 +1188,13 @@ private void expectTargetState(String connector, TargetState state) { private ConnectorInfo createdInfo(SourceSink sourceSink) { return new ConnectorInfo(CONNECTOR_NAME, connectorConfig(sourceSink), - singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), + List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), SourceSink.SOURCE == sourceSink ? ConnectorType.SOURCE : ConnectorType.SINK); } private void expectStop() { ConnectorTaskId task = new ConnectorTaskId(CONNECTOR_NAME, 0); - doNothing().when(worker).stopAndAwaitTasks(singletonList(task)); + doNothing().when(worker).stopAndAwaitTasks(List.of(task)); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); } @@ -1246,7 +1249,7 @@ private void expectConfigValidation( // Set up validation for each config for (Map config : configs) { - when(connectorMock.validate(config)).thenReturn(new Config(Collections.emptyList())); + when(connectorMock.validate(config)).thenReturn(new Config(List.of())); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java index f78ab54950f4a..b12658c35e399 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java @@ -40,8 +40,8 @@ import org.mockito.quality.Strictness; import java.nio.ByteBuffer; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -440,7 +440,7 @@ private MockConsumer createMockConsumer(String topic) { MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); Node noNode = Node.noNode(); Node[] nodes = new Node[]{noNode}; - consumer.updatePartitions(topic, Collections.singletonList(new PartitionInfo(topic, 0, noNode, nodes, nodes))); + consumer.updatePartitions(topic, List.of(new PartitionInfo(topic, 0, noNode, nodes, nodes))); consumer.updateBeginningOffsets(mkMap(mkEntry(new TopicPartition(topic, 0), 100L))); return consumer; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java index 204fcc283bd94..139369c0d560a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.json.JsonConverter; import org.apache.kafka.connect.json.JsonConverterConfig; +import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.util.Callback; @@ -34,8 +35,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -79,13 +78,14 @@ public void setup() { converter = mock(Converter.class); // This is only needed for storing deserialized connector partitions, which we don't test in most of the cases here when(converter.toConnectData(anyString(), any(byte[].class))).thenReturn(new SchemaAndValue(null, - Arrays.asList("connector", Collections.singletonMap("partitionKey", "dummy")))); + List.of("connector", Map.of("partitionKey", "dummy")))); store = new FileOffsetBackingStore(converter); tempFile = assertDoesNotThrow(() -> File.createTempFile("fileoffsetbackingstore", null)); Map props = new HashMap<>(); props.put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, tempFile.getAbsolutePath()); props.put(StandaloneConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); props.put(StandaloneConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); + props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config = new StandaloneConfig(props); store.configure(config); store.start(); @@ -105,7 +105,7 @@ public void testGetSet() throws Exception { store.set(FIRST_SET, setCallback).get(); - Map values = store.get(Arrays.asList(buffer("key"), buffer("bad"))).get(); + Map values = store.get(List.of(buffer("key"), buffer("bad"))).get(); assertEquals(buffer("value"), values.get(buffer("key"))); assertNull(values.get(buffer("bad"))); verify(setCallback).onCompletion(isNull(), isNull()); @@ -123,7 +123,7 @@ public void testSaveRestore() throws Exception { FileOffsetBackingStore restore = new FileOffsetBackingStore(converter); restore.configure(config); restore.start(); - Map values = restore.get(Collections.singletonList(buffer("key"))).get(); + Map values = restore.get(List.of(buffer("key"))).get(); assertEquals(buffer("value"), values.get(buffer("key"))); verify(setCallback).onCompletion(isNull(), isNull()); } @@ -135,26 +135,26 @@ public void testConnectorPartitions() throws Exception { // This test actually requires the offset store to track deserialized source partitions, so we can't use the member variable mock converter JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + jsonConverter.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); Map serializedPartitionOffsets = new HashMap<>(); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), - serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), + serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) ); store.set(serializedPartitionOffsets, setCallback).get(); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), - serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue2")) + serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), + serialize(jsonConverter, Map.of("offsetKey", "offsetValue2")) ); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue2")), - serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue2")), + serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) ); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector2", Collections.singletonMap("partitionKey", "partitionValue")), - serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector2", Map.of("partitionKey", "partitionValue")), + serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) ); store.set(serializedPartitionOffsets, setCallback).get(); @@ -167,23 +167,23 @@ public void testConnectorPartitions() throws Exception { Set> connectorPartitions1 = restore.connectorPartitions("connector1"); Set> expectedConnectorPartition1 = new HashSet<>(); - expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue1")); - expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue2")); + expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue1")); + expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue2")); assertEquals(expectedConnectorPartition1, connectorPartitions1); Set> connectorPartitions2 = restore.connectorPartitions("connector2"); - Set> expectedConnectorPartition2 = Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); + Set> expectedConnectorPartition2 = Set.of(Map.of("partitionKey", "partitionValue")); assertEquals(expectedConnectorPartition2, connectorPartitions2); serializedPartitionOffsets.clear(); // Null valued offset for a partition key should remove that partition for the connector serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), + serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), null ); restore.set(serializedPartitionOffsets, setCallback).get(); connectorPartitions1 = restore.connectorPartitions("connector1"); - assertEquals(Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue2")), connectorPartitions1); + assertEquals(Set.of(Map.of("partitionKey", "partitionValue2")), connectorPartitions1); verify(setCallback, times(3)).onCompletion(isNull(), isNull()); } @@ -193,7 +193,7 @@ private static ByteBuffer buffer(String v) { } private static ByteBuffer serializeKey(Converter converter, String connectorName, Map sourcePartition) { - List nameAndPartition = Arrays.asList(connectorName, sourcePartition); + List nameAndPartition = List.of(connectorName, sourcePartition); return serialize(converter, nameAndPartition); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index d455976423dac..98eaab7df4f10 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -61,8 +61,6 @@ import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -126,41 +124,41 @@ public class KafkaConfigBackingStoreTest { DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); } - private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); - private static final List CONNECTOR_CONFIG_KEYS = Arrays.asList("connector-connector1", "connector-connector2"); - private static final List COMMIT_TASKS_CONFIG_KEYS = Arrays.asList("commit-connector1", "commit-connector2"); + private static final List CONNECTOR_IDS = List.of("connector1", "connector2"); + private static final List CONNECTOR_CONFIG_KEYS = List.of("connector-connector1", "connector-connector2"); + private static final List COMMIT_TASKS_CONFIG_KEYS = List.of("commit-connector1", "commit-connector2"); - private static final List TARGET_STATE_KEYS = Arrays.asList("target-state-connector1", "target-state-connector2"); - private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = Arrays.asList("tasks-fencing-connector1", "tasks-fencing-connector2"); + private static final List TARGET_STATE_KEYS = List.of("target-state-connector1", "target-state-connector2"); + private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = List.of("tasks-fencing-connector1", "tasks-fencing-connector2"); private static final String CONNECTOR_1_NAME = "connector1"; private static final String CONNECTOR_2_NAME = "connector2"; - private static final List RESTART_CONNECTOR_KEYS = Arrays.asList(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); + private static final List RESTART_CONNECTOR_KEYS = List.of(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); // Need a) connector with multiple tasks and b) multiple connectors - private static final List TASK_IDS = Arrays.asList( + private static final List TASK_IDS = List.of( new ConnectorTaskId("connector1", 0), new ConnectorTaskId("connector1", 1), new ConnectorTaskId("connector2", 0) ); - private static final List TASK_CONFIG_KEYS = Arrays.asList("task-connector1-0", "task-connector1-1", "task-connector2-0"); + private static final List TASK_CONFIG_KEYS = List.of("task-connector1-0", "task-connector1-1", "task-connector2-0"); // Need some placeholders -- the contents don't matter here, just that they are restored properly - private static final List> SAMPLE_CONFIGS = Arrays.asList( - Collections.singletonMap("config-key-one", "config-value-one"), - Collections.singletonMap("config-key-two", "config-value-two"), - Collections.singletonMap("config-key-three", "config-value-three") + private static final List> SAMPLE_CONFIGS = List.of( + Map.of("config-key-one", "config-value-one"), + Map.of("config-key-two", "config-value-two"), + Map.of("config-key-three", "config-value-three") ); - private static final List TASK_CONFIG_STRUCTS = Arrays.asList( + private static final List TASK_CONFIG_STRUCTS = List.of( new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)) ); private static final Struct ONLY_FAILED_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(INCLUDE_TASKS_FIELD_NAME, false); private static final Struct INCLUDE_TASKS_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true); - private static final List RESTART_REQUEST_STRUCTS = Arrays.asList( + private static final List RESTART_REQUEST_STRUCTS = List.of( new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true).put(INCLUDE_TASKS_FIELD_NAME, false), ONLY_FAILED_MISSING_STRUCT, INCLUDE_TASKS_MISSING_STRUCT); - private static final List CONNECTOR_CONFIG_STRUCTS = Arrays.asList( + private static final List CONNECTOR_CONFIG_STRUCTS = List.of( new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) @@ -173,14 +171,14 @@ public class KafkaConfigBackingStoreTest { private static final Struct TARGET_STATE_STOPPED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "STOPPED"); - private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = Arrays.asList( + private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = List.of( new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 6), new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9), new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 2) ); // The exact format doesn't matter here since both conversions are mocked - private static final List CONFIGS_SERIALIZED = Arrays.asList( + private static final List CONFIGS_SERIALIZED = List.of( "config-bytes-1".getBytes(), "config-bytes-2".getBytes(), "config-bytes-3".getBytes(), "config-bytes-4".getBytes(), "config-bytes-5".getBytes(), "config-bytes-6".getBytes(), "config-bytes-7".getBytes(), "config-bytes-8".getBytes(), "config-bytes-9".getBytes() @@ -191,7 +189,7 @@ public class KafkaConfigBackingStoreTest { private static final Struct TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 0); - private static final List TARGET_STATES_SERIALIZED = Arrays.asList( + private static final List TARGET_STATES_SERIALIZED = List.of( "started".getBytes(), "paused".getBytes(), "stopped".getBytes() ); @Mock @@ -308,8 +306,8 @@ public void testPutConnectorConfig() throws Exception { String configKey = CONNECTOR_CONFIG_KEYS.get(1); String targetStateKey = TARGET_STATE_KEYS.get(1); - doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) + doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) // Config deletion .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(configKey, null); @@ -426,7 +424,7 @@ public void testPutConnectorConfigProducerError() throws Exception { assertEquals(-1, configState.offset()); assertEquals(0, configState.connectors().size()); - Exception thrownException = new ExecutionException(new TopicAuthorizationException(Collections.singleton("test"))); + Exception thrownException = new ExecutionException(new TopicAuthorizationException(Set.of("test"))); when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenThrow(thrownException); // verify that the producer exception from KafkaBasedLog::send is propagated @@ -510,8 +508,8 @@ public void testWritePrivileges() throws Exception { doReturn(fencableProducer).when(configStorage).createFencableProducer(); // And write the task count record successfully when(fencableProducer.send(any(ProducerRecord.class))).thenReturn(null); - doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) + doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) .when(configLog).readToEnd(); when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(0))) .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)))); @@ -570,7 +568,7 @@ public void testWritePrivileges() throws Exception { @Test public void testRestoreTargetStateUnexpectedDeletion() { - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -599,7 +597,7 @@ public void testRestoreTargetStateUnexpectedDeletion() { // The target state deletion should reset the state to STARTED ClusterConfigState configState = configStorage.snapshot(); assertEquals(5, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); configStorage.stop(); @@ -608,7 +606,7 @@ public void testRestoreTargetStateUnexpectedDeletion() { @Test public void testRestoreTargetState() { - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -643,7 +641,7 @@ public void testRestoreTargetState() { // Should see a single connector with initial state paused ClusterConfigState configState = configStorage.snapshot(); assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); @@ -657,7 +655,7 @@ public void testRestore() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), @@ -699,18 +697,18 @@ public void testRestore() { // Should see a single connector and its config should be the last one seen anywhere in the log ClusterConfigState configState = configStorage.snapshot(); assertEquals(logOffset, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); // Should see 2 tasks for that connector. Only config updates before the root key update should be reflected - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(1))); assertEquals(9, (int) configState.taskCountRecord(CONNECTOR_IDS.get(1))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); - assertEquals(Collections.singleton("connector1"), configState.connectorsPendingFencing); + assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Set.of("connector1"), configState.connectorsPendingFencing); // Shouldn't see any callbacks since this is during startup configStorage.stop(); @@ -723,7 +721,7 @@ public void testRestoreConnectorDeletion() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -769,7 +767,7 @@ public void testRestoreZeroTasks() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -807,13 +805,13 @@ public void testRestoreZeroTasks() { // Should see a single connector and its config should be the last one seen anywhere in the log ClusterConfigState configState = configStorage.snapshot(); assertEquals(8, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); // Should see 0 tasks for that connector. - assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Set.of(), configState.inconsistentConnectors()); // Shouldn't see any callbacks since this is during startup configStorage.stop(); @@ -1020,7 +1018,7 @@ public void testConsumerPropertiesNotInsertedByDefaultWithoutExactlyOnceSourceEn @Test public void testBackgroundConnectorDeletion() throws Exception { // verify that we handle connector deletions correctly when they come up through the log - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1072,7 +1070,7 @@ public void testBackgroundConnectorDeletion() throws Exception { assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); // Ensure that the deleted connector's deferred task updates have been cleaned up // in order to prevent unbounded growth of the map - assertEquals(Collections.emptyMap(), configStorage.deferredTaskUpdates); + assertEquals(Map.of(), configStorage.deferredTaskUpdates); configStorage.stop(); verify(configLog).stop(); @@ -1083,7 +1081,7 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // Test a case where a failure and compaction has left us in an inconsistent state when reading the log. // We start out by loading an initial configuration where we started to write a task update, and then // compaction cleaned up the earlier record. - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), // This is the record that has been compacted: @@ -1110,13 +1108,13 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // After reading the log, it should have been in an inconsistent state ClusterConfigState configState = configStorage.snapshot(); assertEquals(6, configState.offset()); // Should always be next to be read, not last committed - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list - assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] assertNull(configState.taskConfig(TASK_IDS.get(0))); assertNull(configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); + assertEquals(Set.of(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); // Records to be read by consumer as it reads to the end of the log LinkedHashMap serializedConfigs = new LinkedHashMap<>(); @@ -1136,20 +1134,20 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // Next, issue a write that has everything that is needed and it should be accepted. Note that in this case // we are going to shrink the number of tasks to 1 - configStorage.putTaskConfigs("connector1", Collections.singletonList(SAMPLE_CONFIGS.get(0))); + configStorage.putTaskConfigs("connector1", List.of(SAMPLE_CONFIGS.get(0))); // Validate updated config configState = configStorage.snapshot(); // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written // to the topic. Only the last call with 1 task config + 1 commit actually gets written. assertEquals(8, configState.offset()); - assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(Collections.singletonList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(List.of(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Set.of(), configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(0))); + verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0))); configStorage.stop(); verify(configLog).stop(); @@ -1168,7 +1166,7 @@ public void testPutRestartRequestOnlyFailedIncludingTasks() { } private void testPutRestartRequest(RestartRequest restartRequest) { - expectStart(Collections.emptyList(), Collections.emptyMap()); + expectStart(List.of(), Map.of()); when(configLog.partitionCount()).thenReturn(1); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); @@ -1204,7 +1202,7 @@ public void testRestoreRestartRequestInconsistentState() { // Restoring data should notify only of the latest values after loading is complete. This also validates // that inconsistent state doesn't prevent startup. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), @@ -1250,7 +1248,7 @@ public void testPutTaskConfigsZeroTasks() { // Records to be read by consumer as it reads to the end of the log doAnswer(expectReadToEnd(new LinkedHashMap<>())). - doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + doAnswer(expectReadToEnd(Map.of(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) .when(configLog).readToEnd(); expectConvertWriteRead( @@ -1258,7 +1256,7 @@ public void testPutTaskConfigsZeroTasks() { "tasks", 0); // We have 0 tasks // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); // Null before writing @@ -1267,19 +1265,19 @@ public void testPutTaskConfigsZeroTasks() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = Collections.emptyList(); + List> taskConfigs = List.of(); configStorage.putTaskConfigs("connector1", taskConfigs); // Validate root config by listing all connectors and tasks configState = configStorage.snapshot(); assertEquals(1, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Collections.emptyList(), configState.tasks(connectorName)); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(List.of(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(List.of(), configState.tasks(connectorName)); + assertEquals(Set.of(), configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); + verify(configUpdateListener).onTaskConfigUpdate(List.of()); configStorage.stop(); verify(configLog).stop(); @@ -1288,7 +1286,7 @@ public void testPutTaskConfigsZeroTasks() { @Test public void testBackgroundUpdateTargetState() throws Exception { // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1314,7 +1312,7 @@ public void testBackgroundUpdateTargetState() throws Exception { // Should see a single connector with initial state started ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); + assertEquals(Set.of(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); @@ -1343,7 +1341,7 @@ public void testBackgroundUpdateTargetState() throws Exception { @Test public void testSameTargetState() { // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1395,7 +1393,7 @@ public void testPutLogLevel() throws Exception { // Pre-populate the config topic with a couple of logger level records; these should be ignored (i.e., // not reported to the update listener) - List> existingRecords = Arrays.asList( + List> existingRecords = List.of( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger1, CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty() ), @@ -1482,7 +1480,7 @@ public void testTaskCountRecordsAndGenerations() { CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONFIGS_SERIALIZED.get(3), new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 4)); - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); // Before anything is written String connectorName = CONNECTOR_IDS.get(0); @@ -1493,7 +1491,7 @@ public void testTaskCountRecordsAndGenerations() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); configState = configStorage.snapshot(); @@ -1511,7 +1509,7 @@ public void testTaskCountRecordsAndGenerations() { assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); configStorage.stop(); verify(configLog).stop(); @@ -1545,7 +1543,7 @@ public void testPutTaskConfigs() { COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); // Starts with 0 tasks, after update has 2 // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); // Null before writing ClusterConfigState configState = configStorage.snapshot(); @@ -1555,21 +1553,21 @@ public void testPutTaskConfigs() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); // Validate root config by listing all connectors and tasks configState = configStorage.snapshot(); assertEquals(3, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); + assertEquals(List.of(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Set.of(), configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); configStorage.stop(); verify(configLog).stop(); @@ -1616,8 +1614,8 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { assertNull(configState.taskConfig(TASK_IDS.get(1))); // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); - List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); + List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); expectConvertWriteRead2( @@ -1627,8 +1625,8 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { COMMIT_TASKS_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(4), new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 1)); // Starts with 2 tasks, after update has 3 - addConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), Collections.emptyList()); - taskConfigs = Collections.singletonList(SAMPLE_CONFIGS.get(2)); + addConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), List.of()); + taskConfigs = List.of(SAMPLE_CONFIGS.get(2)); configStorage.putTaskConfigs("connector2", taskConfigs); // Validate root config by listing all connectors and tasks @@ -1636,17 +1634,17 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { assertEquals(5, configState.offset()); String connectorName1 = CONNECTOR_IDS.get(0); String connectorName2 = CONNECTOR_IDS.get(1); - assertEquals(Arrays.asList(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); - assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); - assertEquals(Collections.singletonList(TASK_IDS.get(2)), configState.tasks(connectorName2)); + assertEquals(List.of(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); + assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); + assertEquals(List.of(TASK_IDS.get(2)), configState.tasks(connectorName2)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); assertEquals(SAMPLE_CONFIGS.get(2), configState.taskConfig(TASK_IDS.get(2))); - assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Set.of(), configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); - verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(2))); + verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(2))); configStorage.stop(); verify(configLog).stop(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java index 6a1969fe64fc6..19aafabee7178 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java @@ -45,9 +45,9 @@ import java.nio.ByteBuffer; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -145,7 +145,7 @@ public void setup(Boolean mockKeyConverter) { if (mockKeyConverter) { when(keyConverter.toConnectData(any(), any())).thenReturn(new SchemaAndValue(null, - Arrays.asList("connector", Collections.singletonMap("partitionKey", "dummy")))); + List.of("connector", Map.of("partitionKey", "dummy")))); } store = spy(new KafkaOffsetBackingStore(adminSupplier, clientIdBase, keyConverter)); @@ -233,7 +233,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting from empty store should return nulls - Map offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + Map offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); // Since we didn't read them yet, these will be null assertNull(offsets.get(TP0_KEY)); assertNull(offsets.get(TP1_KEY)); @@ -270,7 +270,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertEquals(TP0_VALUE, offsets.get(TP0_KEY)); assertEquals(TP1_VALUE, offsets.get(TP1_KEY)); @@ -287,7 +287,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertEquals(TP0_VALUE_NEW, offsets.get(TP0_KEY)); assertEquals(TP1_VALUE_NEW, offsets.get(TP1_KEY)); @@ -363,7 +363,7 @@ public void testGetSetNull() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(Collections.singletonList(TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(List.of(TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertNull(offsets.get(TP1_KEY)); // Just verifying that KafkaOffsetBackingStore::get returns null isn't enough, we also need to verify that the mapping for the source partition key is removed. @@ -488,7 +488,7 @@ public void testClientIds() { @Test public void testConnectorPartitions() throws Exception { JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + jsonConverter.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); store = spy(new KafkaOffsetBackingStore(() -> { fail("Should not attempt to instantiate admin in these tests"); return null; @@ -506,57 +506,57 @@ public void testConnectorPartitions() throws Exception { doAnswer(invocation -> { capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, Arrays.asList("connector1", - Collections.singletonMap("partitionKey", "partitionValue1"))), TP0_VALUE.array(), + jsonConverter.fromConnectData("", null, List.of("connector1", + Map.of("partitionKey", "partitionValue1"))), TP0_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, Arrays.asList("connector1", - Collections.singletonMap("partitionKey", "partitionValue1"))), TP1_VALUE.array(), + jsonConverter.fromConnectData("", null, List.of("connector1", + Map.of("partitionKey", "partitionValue1"))), TP1_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, Arrays.asList("connector1", - Collections.singletonMap("partitionKey", "partitionValue2"))), TP2_VALUE.array(), + jsonConverter.fromConnectData("", null, List.of("connector1", + Map.of("partitionKey", "partitionValue2"))), TP2_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, Arrays.asList("connector2", - Collections.singletonMap("partitionKey", "partitionValue"))), TP1_VALUE.array(), + jsonConverter.fromConnectData("", null, List.of("connector2", + Map.of("partitionKey", "partitionValue"))), TP1_VALUE.array(), new RecordHeaders(), Optional.empty())); storeLogCallbackArgumentCaptor.getValue().onCompletion(null, null); return null; }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Trigger a read to the end of the log - store.get(Collections.emptyList()).get(10000, TimeUnit.MILLISECONDS); + store.get(List.of()).get(10000, TimeUnit.MILLISECONDS); Set> connectorPartitions1 = store.connectorPartitions("connector1"); Set> expectedConnectorPartition1 = new HashSet<>(); - expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue1")); - expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue2")); + expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue1")); + expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue2")); assertEquals(expectedConnectorPartition1, connectorPartitions1); Set> connectorPartitions2 = store.connectorPartitions("connector2"); - Set> expectedConnectorPartition2 = Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); + Set> expectedConnectorPartition2 = Set.of(Map.of("partitionKey", "partitionValue")); assertEquals(expectedConnectorPartition2, connectorPartitions2); doAnswer(invocation -> { capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, Arrays.asList("connector1", - Collections.singletonMap("partitionKey", "partitionValue1"))), null, + jsonConverter.fromConnectData("", null, List.of("connector1", + Map.of("partitionKey", "partitionValue1"))), null, new RecordHeaders(), Optional.empty())); storeLogCallbackArgumentCaptor.getValue().onCompletion(null, null); return null; }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Trigger a read to the end of the log - store.get(Collections.emptyList()).get(10000, TimeUnit.MILLISECONDS); + store.get(List.of()).get(10000, TimeUnit.MILLISECONDS); // Null valued offset for a partition key should remove that partition for the connector connectorPartitions1 = store.connectorPartitions("connector1"); - assertEquals(Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue2")), connectorPartitions1); + assertEquals(Set.of(Map.of("partitionKey", "partitionValue2")), connectorPartitions1); store.stop(); verify(storeLog).stop(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java index 83d9e953478e0..dadb3f4242315 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java @@ -35,9 +35,9 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; +import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import static org.apache.kafka.connect.json.JsonConverterConfig.SCHEMAS_ENABLE_CONFIG; @@ -75,7 +75,7 @@ public class KafkaStatusBackingStoreFormatTest { public void setup() { time = new MockTime(); JsonConverter converter = new JsonConverter(); - converter.configure(Collections.singletonMap(SCHEMAS_ENABLE_CONFIG, false), false); + converter.configure(Map.of(SCHEMAS_ENABLE_CONFIG, false), false); store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, () -> null, kafkaBasedLog); } @@ -182,7 +182,7 @@ public void deleteTopicStatus() { store.read(statusRecord); assertTrue(store.topics.containsKey("bar")); assertFalse(store.topics.get("bar").containsKey("foo")); - assertEquals(Collections.emptyMap(), store.topics.get("bar")); + assertEquals(Map.of(), store.topics.get("bar")); } @Test @@ -204,7 +204,7 @@ public void putTopicState() { ConsumerRecord statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, key, valueCaptor.getValue()); store.read(statusRecord); assertEquals(topicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC)); - assertEquals(Collections.singleton(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); + assertEquals(Set.of(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); } @Test @@ -277,7 +277,7 @@ public void putTopicStateShouldOverridePreviousState() { assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue())); assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC)); assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC)); - assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); + assertEquals(Set.of(firstTopicStatus, secondTopicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java index a9ac5f483bef1..108dbbc45c3f1 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java @@ -43,11 +43,11 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.function.Supplier; import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; @@ -388,8 +388,8 @@ public void deleteConnectorState() { verify(kafkaBasedLog).send(eq("status-connector-" + CONNECTOR), eq(value), any(Callback.class)); verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(new HashSet<>(Collections.singletonList(CONNECTOR)), store.connectors()); - assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(Set.of(CONNECTOR), store.connectors()); + assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-connector-conn", null)); assertTrue(store.connectors().isEmpty()); assertTrue(store.getAll(CONNECTOR).isEmpty()); @@ -412,7 +412,7 @@ public void deleteTaskState() { verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-task-conn-0", null)); assertTrue(store.getAll(CONNECTOR).isEmpty()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java index a06496d112dd2..f0a87695f9c22 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java @@ -28,9 +28,7 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -51,13 +49,13 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class MemoryConfigBackingStoreTest { - private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); + private static final List CONNECTOR_IDS = List.of("connector1", "connector2"); // Actual values are irrelevant here and can be used as either connector or task configurations - private static final List> SAMPLE_CONFIGS = Arrays.asList( - Collections.singletonMap("config-key-one", "config-value-one"), - Collections.singletonMap("config-key-two", "config-value-two"), - Collections.singletonMap("config-key-three", "config-value-three") + private static final List> SAMPLE_CONFIGS = List.of( + Map.of("config-key-one", "config-value-one"), + Map.of("config-key-two", "config-value-two"), + Map.of("config-key-three", "config-value-three") ); @Mock @@ -142,10 +140,10 @@ public void testRemoveConnectorConfig() { public void testPutTaskConfigs() { // Can't write task configs for non-existent connector assertThrows(IllegalArgumentException.class, - () -> configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1)))); + () -> configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1)))); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); - configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1))); + configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1))); ClusterConfigState configState = configStore.snapshot(); ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_IDS.get(0), 0); @@ -153,7 +151,7 @@ public void testPutTaskConfigs() { assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(taskId)); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); - verify(configUpdateListener).onTaskConfigUpdate(eq(Collections.singleton(taskId))); + verify(configUpdateListener).onTaskConfigUpdate(eq(Set.of(taskId))); } @Test @@ -172,18 +170,18 @@ public void testRemoveTaskConfigs() { }).when(configUpdateListener).onTaskConfigUpdate(anySet()); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); - configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1))); + configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1))); configStore.removeTaskConfigs(CONNECTOR_IDS.get(0)); ClusterConfigState configState = configStore.snapshot(); assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); - assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); verify(configUpdateListener, times(2)).onTaskConfigUpdate(anySet()); ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_IDS.get(0), 0); - assertEquals(Arrays.asList(Collections.singleton(taskId), Collections.singleton(taskId)), onTaskConfigUpdateCaptures); + assertEquals(List.of(Set.of(taskId), Set.of(taskId)), onTaskConfigUpdateCaptures); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java index 33d76cbd6a6d5..7e6d072b9a720 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; +import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -44,7 +44,7 @@ public void putAndGetTaskStatus() { TaskStatus status = new TaskStatus(taskId, ConnectorStatus.State.RUNNING, "localhost:8083", 0); store.put(status); assertEquals(status, store.get(taskId)); - assertEquals(Collections.singleton(status), store.getAll("connector")); + assertEquals(Set.of(status), store.getAll("connector")); } @Test @@ -63,5 +63,4 @@ public void deleteTaskStatus() { store.put(new TaskStatus(taskId, ConnectorStatus.State.DESTROYED, "localhost:8083", 0)); assertNull(store.get(taskId)); } - } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java index 23d17cd9970f8..94b5bb0e78c81 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java @@ -54,8 +54,8 @@ public class OffsetStorageWriterTest { private static final String NAMESPACE = "namespace"; // Connect format - any types should be accepted here - private static final Map OFFSET_KEY = Collections.singletonMap("key", "key"); - private static final Map OFFSET_VALUE = Collections.singletonMap("key", 12); + private static final Map OFFSET_KEY = Map.of("key", "key"); + private static final Map OFFSET_VALUE = Map.of("key", 12); // Serialized private static final byte[] OFFSET_KEY_SERIALIZED = "key-serialized".getBytes(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java index d4f0cf45203c0..9cc0f34af8810 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java @@ -26,8 +26,8 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; @@ -40,7 +40,7 @@ public class OffsetUtilsTest { private static final JsonConverter CONVERTER = new JsonConverter(); static { - CONVERTER.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + CONVERTER.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); } @Test @@ -60,18 +60,18 @@ public void testValidateFormatMapWithNonStringKeys() { @Test public void testValidateFormatMapWithNonPrimitiveKeys() { - Map offsetData = Collections.singletonMap("key", new Object()); + Map offsetData = Map.of("key", new Object()); DataException e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData)); assertTrue(e.getMessage().contains("Offsets may only contain primitive types as values")); - Map offsetData2 = Collections.singletonMap("key", new ArrayList<>()); + Map offsetData2 = Map.of("key", new ArrayList<>()); e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData2)); assertTrue(e.getMessage().contains("Offsets may only contain primitive types as values")); } @Test public void testValidateFormatWithValidFormat() { - Map offsetData = Collections.singletonMap("key", 1); + Map offsetData = Map.of("key", 1); // Expect no exception to be thrown OffsetUtils.validateFormat(offsetData); } @@ -99,17 +99,17 @@ public void testProcessPartitionKeyNotList() { @Test public void testProcessPartitionKeyListWithOneElement() { assertInvalidPartitionKey( - serializePartitionKey(Collections.singletonList("")), + serializePartitionKey(List.of("")), "Ignoring offset partition key with an unexpected number of elements"); } @Test public void testProcessPartitionKeyListWithElementsOfWrongType() { assertInvalidPartitionKey( - serializePartitionKey(Arrays.asList(1, new HashMap<>())), + serializePartitionKey(List.of(1, new HashMap<>())), "Ignoring offset partition key with an unexpected format for the first element in the partition key list"); assertInvalidPartitionKey( - serializePartitionKey(Arrays.asList("connector-name", new ArrayList<>())), + serializePartitionKey(List.of("connector-name", new ArrayList<>())), "Ignoring offset partition key with an unexpected format for the second element in the partition key list"); } @@ -128,7 +128,7 @@ public void assertInvalidPartitionKey(byte[] key, String message) { public void testProcessPartitionKeyValidList() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(OffsetUtils.class)) { Map>> connectorPartitions = new HashMap<>(); - OffsetUtils.processPartitionKey(serializePartitionKey(Arrays.asList("connector-name", new HashMap<>())), new byte[0], CONVERTER, connectorPartitions); + OffsetUtils.processPartitionKey(serializePartitionKey(List.of("connector-name", new HashMap<>())), new byte[0], CONVERTER, connectorPartitions); assertEquals(1, connectorPartitions.size()); assertEquals(0, logCaptureAppender.getMessages().size()); } @@ -139,7 +139,7 @@ public void testProcessPartitionKeyNullPartition() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(OffsetUtils.class)) { Map>> connectorPartitions = new HashMap<>(); OffsetUtils.processPartitionKey(serializePartitionKey(Arrays.asList("connector-name", null)), new byte[0], CONVERTER, connectorPartitions); - assertEquals(Collections.emptyMap(), connectorPartitions); + assertEquals(Map.of(), connectorPartitions); assertEquals(0, logCaptureAppender.getMessages().size()); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java index 7b2e8d7cfa733..95216af9be696 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java @@ -27,7 +27,6 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -125,7 +124,7 @@ public void testOverrideWarning() { "thanks to newly-introduced federal legislation", false) ); - assertEquals(Collections.singletonMap("\u1984", "big brother"), props); + assertEquals(Map.of("\u1984", "big brother"), props); props.clear(); props.put("\u1984", "BIG BROTHER"); @@ -141,7 +140,7 @@ public void testOverrideWarning() { "thanks to newly-introduced federal legislation", true) ); - assertEquals(Collections.singletonMap("\u1984", "big brother"), props); + assertEquals(Map.of("\u1984", "big brother"), props); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java index aabf894e1ea90..defac44851b08 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java @@ -49,9 +49,7 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -100,7 +98,7 @@ public class KafkaBasedLogTest { CONSUMER_PROPS.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); } - private static final Set CONSUMER_ASSIGNMENT = new HashSet<>(Arrays.asList(TP0, TP1)); + private static final Set CONSUMER_ASSIGNMENT = Set.of(TP0, TP1); private static final Map FIRST_SET = new HashMap<>(); static { FIRST_SET.put("key", "value"); @@ -153,7 +151,7 @@ protected MockConsumer createConsumer() { } }; consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); - consumer.updatePartitions(TOPIC, Arrays.asList(TPINFO0, TPINFO1)); + consumer.updatePartitions(TOPIC, List.of(TPINFO0, TPINFO1)); Map beginningOffsets = new HashMap<>(); beginningOffsets.put(TP0, 0L); beginningOffsets.put(TP1, 0L); @@ -408,7 +406,7 @@ public void testGetOffsetsConsumerErrorOnReadToEnd() throws Exception { @Test public void testOffsetReadFailureWhenWorkThreadFails() throws Exception { RuntimeException exception = new RuntimeException(); - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -482,7 +480,7 @@ public void testProducerError() { @Test public void testReadEndOffsetsUsingAdmin() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -498,7 +496,7 @@ public void testReadEndOffsetsUsingAdmin() { @Test public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); admin = mock(TopicAdmin.class); // Getting end offsets using the admin client should fail with unsupported version when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenThrow(new UnsupportedVersionException("too old")); @@ -516,7 +514,7 @@ public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() { @Test public void testReadEndOffsetsUsingAdminThatFailsWithRetriable() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java index 2fb788a1f495a..22ffd21d5de16 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java @@ -26,7 +26,6 @@ import org.mockito.quality.Strictness; import java.time.Duration; -import java.util.Collections; import java.util.Map; import java.util.function.Function; @@ -43,7 +42,7 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class SharedTopicAdminTest { - private static final Map EMPTY_CONFIG = Collections.emptyMap(); + private static final Map EMPTY_CONFIG = Map.of(); @Mock private TopicAdmin mockTopicAdmin; @Mock private Function, TopicAdmin> factory; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java index 268b27e19d8b2..b22602872f18c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java @@ -23,7 +23,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -44,7 +43,7 @@ public void testConsumerGroupOffsetsToConnectorOffsets() { connectorOffsets = SinkUtils.consumerGroupOffsetsToConnectorOffsets(consumerGroupOffsets); assertEquals(1, connectorOffsets.offsets().size()); - assertEquals(Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 100L), connectorOffsets.offsets().get(0).offset()); + assertEquals(Map.of(SinkUtils.KAFKA_OFFSET_KEY, 100L), connectorOffsets.offsets().get(0).offset()); Map expectedPartition = new HashMap<>(); expectedPartition.put(SinkUtils.KAFKA_TOPIC_KEY, "test-topic"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java index dee4a24106e36..9bcf117e73fa2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java @@ -44,5 +44,4 @@ public void basicOperations() { assertNull(table.get("foo", 6)); assertTrue(table.row("foo").isEmpty()); } - } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java index 1f25dd15f514c..b40683865b454 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java @@ -65,12 +65,10 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic; @@ -162,8 +160,8 @@ public void shouldNotCreateTopicWhenItAlreadyExists() { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, "myTopic", Collections.singletonList(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, "myTopic", List.of(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); assertFalse(admin.createTopic(newTopic)); assertTrue(admin.createTopics(newTopic).isEmpty()); @@ -308,12 +306,12 @@ public void describeShouldReturnTopicDescriptionWhenTopicExists() { NewTopic newTopic = TopicAdmin.defineTopic(topicName).partitions(1).compacted().build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); Map desc = admin.describeTopics(newTopic.name()); assertFalse(desc.isEmpty()); - TopicDescription topicDesc = new TopicDescription(topicName, false, Collections.singletonList(topicPartitionInfo)); + TopicDescription topicDesc = new TopicDescription(topicName, false, List.of(topicPartitionInfo)); assertEquals(desc.get("myTopic"), topicDesc); } } @@ -382,14 +380,14 @@ public void describeTopicConfigShouldReturnMapWithNullValueWhenTopicDoesNotExist public void describeTopicConfigShouldReturnTopicConfigWhenTopicExists() { String topicName = "myTopic"; NewTopic newTopic = TopicAdmin.defineTopic(topicName) - .config(Collections.singletonMap("foo", "bar")) + .config(Map.of("foo", "bar")) .partitions(1) .compacted() .build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); Map result = admin.describeTopicConfigs(newTopic.name()); assertFalse(result.isEmpty()); @@ -439,11 +437,11 @@ public void verifyingTopicCleanupPolicyShouldReturnFalseWhenTopicAuthorizationEr @Test public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy() { String topicName = "myTopic"; - Map topicConfigs = Collections.singletonMap("cleanup.policy", "compact"); + Map topicConfigs = Map.of("cleanup.policy", "compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertTrue(result); @@ -453,11 +451,11 @@ public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy @Test public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() { String topicName = "myTopic"; - Map topicConfigs = Collections.singletonMap("cleanup.policy", "delete"); + Map topicConfigs = Map.of("cleanup.policy", "delete"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); @@ -467,11 +465,11 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() { @Test public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPolicy() { String topicName = "myTopic"; - Map topicConfigs = Collections.singletonMap("cleanup.policy", "delete,compact"); + Map topicConfigs = Map.of("cleanup.policy", "delete,compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); @@ -481,11 +479,11 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPol @Test public void verifyingGettingTopicCleanupPolicies() { String topicName = "myTopic"; - Map topicConfigs = Collections.singletonMap("cleanup.policy", "compact"); + Map topicConfigs = Map.of("cleanup.policy", "compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); - mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); + mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); Set policies = admin.topicCleanupPolicy("myTopic"); assertEquals(1, policies.size()); @@ -502,7 +500,7 @@ public void verifyingGettingTopicCleanupPolicies() { public void retryEndOffsetsShouldRethrowUnknownVersionException() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -520,7 +518,7 @@ public void retryEndOffsetsShouldRethrowUnknownVersionException() { public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); @@ -549,7 +547,7 @@ public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException( public void retryEndOffsetsShouldRetryWhenTopicNotFound() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); @@ -561,7 +559,7 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { TopicAdmin admin = new TopicAdmin(env.adminClient()); Map endoffsets = admin.retryEndOffsets(tps, Duration.ofMillis(100), 1); - assertEquals(Collections.singletonMap(tp1, offset), endoffsets); + assertEquals(Map.of(tp1, offset), endoffsets); } } @@ -569,7 +567,7 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -586,7 +584,7 @@ public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -602,7 +600,7 @@ public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErro public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv( @@ -620,7 +618,7 @@ public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() { public void endOffsetsShouldFailWithNonRetriableWhenUnknownErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -639,7 +637,7 @@ public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() { Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { TopicAdmin admin = new TopicAdmin(env.adminClient()); - Map offsets = admin.endOffsets(Collections.emptySet()); + Map offsets = admin.endOffsets(Set.of()); assertTrue(offsets.isEmpty()); } } @@ -648,7 +646,7 @@ public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() { public void endOffsetsShouldReturnOffsetsForOnePartition() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); long offset = 1000L; Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -667,7 +665,7 @@ public void endOffsetsShouldReturnOffsetsForMultiplePartitions() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); TopicPartition tp2 = new TopicPartition(topicName, 1); - Set tps = new HashSet<>(Arrays.asList(tp1, tp2)); + Set tps = Set.of(tp1, tp2); long offset1 = 1001; long offset2 = 1002; Cluster cluster = createCluster(1, topicName, 2); @@ -687,7 +685,7 @@ public void endOffsetsShouldReturnOffsetsForMultiplePartitions() { public void endOffsetsShouldFailWhenAnyTopicPartitionHasError() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Collections.singleton(tp1); + Set tps = Set.of(tp1); Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -719,8 +717,8 @@ private Cluster createCluster(int numNodes, String topicName, int partitions) { "mockClusterId", nodes.values(), pInfos, - Collections.emptySet(), - Collections.emptySet(), + Set.of(), + Set.of(), leader); } @@ -738,9 +736,9 @@ private MetadataResponse prepareMetadataResponse(Cluster cluster, Errors topicEr .setPartitionIndex(pInfo.partition()) .setLeaderId(pInfo.leader().id()) .setLeaderEpoch(234) - .setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).collect(Collectors.toList())) - .setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toList())) - .setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).collect(Collectors.toList())); + .setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).toList()) + .setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).toList()) + .setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).toList()); pms.add(pm); } MetadataResponseTopic tm = new MetadataResponseTopic() @@ -788,7 +786,7 @@ private ListOffsetsResponse listOffsetsResultWithClusterAuthorizationException(T } private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1) { - return listOffsetsResult(null, Collections.singletonMap(tp1, offset1)); + return listOffsetsResult(null, Map.of(tp1, offset1)); } private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1, TopicPartition tp2, Long offset2) { @@ -890,7 +888,7 @@ protected void assertTopic(MockAdminClient admin, String topicName, int expected protected TopicDescription topicDescription(MockAdminClient admin, String topicName) throws ExecutionException, InterruptedException { - DescribeTopicsResult result = admin.describeTopics(Collections.singleton(topicName)); + DescribeTopicsResult result = admin.describeTopics(Set.of(topicName)); Map> byName = result.topicNameValues(); return byName.get(topicName).get(); } @@ -959,8 +957,8 @@ private DescribeConfigsResponse describeConfigsResponse(ApiError error, NewTopic .map(e -> new DescribeConfigsResponseData.DescribeConfigsResourceResult() .setName(e.getKey()) .setValue(e.getValue())) - .collect(Collectors.toList()))) - .collect(Collectors.toList()); + .toList())) + .toList(); return new DescribeConfigsResponse(new DescribeConfigsResponseData().setThrottleTimeMs(1000).setResults(results)); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java index ca358f18f4360..ecf25761e96fd 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java @@ -33,12 +33,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT; @@ -136,7 +134,7 @@ public void testTopicCreationWhenTopicCreationIsEnabled() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(topicCreation.defaultTopicGroup(), groups.get(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -155,7 +153,7 @@ public void testTopicCreationWhenTopicCreationIsDisabled() { assertFalse(topicCreation.isTopicCreationEnabled()); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertNull(topicCreation.defaultTopicGroup()); - assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); + assertEquals(Map.of(), topicCreation.topicGroups()); assertNull(topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -170,7 +168,7 @@ public void testEmptyTopicCreation() { assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertNull(topicCreation.defaultTopicGroup()); assertEquals(0, topicCreation.topicGroups().size()); - assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); + assertEquals(Map.of(), topicCreation.topicGroups()); assertNull(topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -188,14 +186,14 @@ public void withDefaultTopicCreation() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(DEFAULT_PARTITIONS, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(1, groups.size()); - assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -206,7 +204,7 @@ public void withDefaultTopicCreation() { assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name()); assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); + assertEquals(Map.of(), topicCreation.topicGroups()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -216,7 +214,7 @@ public void withDefaultTopicCreation() { assertEquals(FOO_TOPIC, topicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, topicSpec.replicationFactor()); assertEquals(DEFAULT_PARTITIONS, topicSpec.numPartitions()); - assertEquals(Collections.emptyMap(), topicSpec.configs()); + assertEquals(Map.of(), topicSpec.configs()); } @Test @@ -242,14 +240,14 @@ public void topicCreationWithDefaultGroupAndCustomProps() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(replicas, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(topicProps, sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(1, groups.size()); - assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -260,7 +258,7 @@ public void topicCreationWithDefaultGroupAndCustomProps() { assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name()); assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); + assertEquals(Map.of(), topicCreation.topicGroups()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -293,14 +291,14 @@ public void topicCreationWithOneGroup() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -319,7 +317,7 @@ public void topicCreationWithOneGroup() { assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(1, topicCreation.topicGroups().size()); - assertEquals(Collections.singleton(FOO_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -329,7 +327,7 @@ public void topicCreationWithOneGroup() { assertEquals(BAR_TOPIC, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); + assertEquals(Map.of(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); @@ -358,14 +356,14 @@ public void topicCreationWithOneGroupAndCombinedRegex() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -385,7 +383,7 @@ public void topicCreationWithOneGroupAndCombinedRegex() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(1, topicCreation.topicGroups().size()); - assertEquals(Collections.singleton(FOO_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(fooGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -435,14 +433,14 @@ public void topicCreationWithTwoGroups() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -466,7 +464,7 @@ public void topicCreationWithTwoGroups() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -480,7 +478,7 @@ public void topicCreationWithTwoGroups() { assertEquals(otherTopic, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); + assertEquals(Map.of(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); @@ -514,7 +512,7 @@ public void testTopicCreationWithSingleTransformation() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(groups.get(DEFAULT_TOPIC_CREATION_GROUP), topicCreation.defaultTopicGroup()); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -567,14 +565,14 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -598,7 +596,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -612,7 +610,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertEquals(otherTopic, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); + assertEquals(Map.of(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java index 8dc22edb86309..c901361cb64b4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java @@ -25,7 +25,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Map; @@ -147,7 +146,7 @@ protected Optional checkBrokersUp(int numBrokers, BiFunction topicNameSet = new HashSet<>(Arrays.asList(topicNames)); + Set topicNameSet = Set.of(topicNames); AtomicReference> existingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { @@ -164,7 +163,7 @@ public void assertTopicsDoNotExist(String... topicNames) throws InterruptedExcep * @param topicNames the names of the topics that are expected to exist */ public void assertTopicsExist(String... topicNames) throws InterruptedException { - Set topicNameSet = new HashSet<>(Arrays.asList(topicNames)); + Set topicNameSet = Set.of(topicNames); AtomicReference> missingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java index 569d1fb0fd093..ddd7eab4e8b3e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java @@ -47,7 +47,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -282,7 +281,7 @@ public String configureConnector(CreateConnectorRequest createConnectorRequest) throw new ConnectException("Failed to serialize connector creation request: " + createConnectorRequest); } - Response response = requestPost(url, requestBody, Collections.emptyMap()); + Response response = requestPost(url, requestBody, Map.of()); if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { return responseToString(response); } else { @@ -449,7 +448,7 @@ public void resumeConnector(String connName) { */ public void restartConnector(String connName) { String url = endpointForResource(String.format("connectors/%s/restart", connName)); - Response response = requestPost(url, "", Collections.emptyMap()); + Response response = requestPost(url, "", Map.of()); if (response.getStatus() >= Response.Status.BAD_REQUEST.getStatusCode()) { throw new ConnectRestException(response.getStatus(), "Could not execute POST request. Error response: " + responseToString(response)); @@ -466,7 +465,7 @@ public void restartConnector(String connName) { */ public void restartTask(String connName, int taskNum) { String url = endpointForResource(String.format("connectors/%s/tasks/%d/restart", connName, taskNum)); - Response response = requestPost(url, "", Collections.emptyMap()); + Response response = requestPost(url, "", Map.of()); if (response.getStatus() >= Response.Status.BAD_REQUEST.getStatusCode()) { throw new ConnectRestException(response.getStatus(), "Could not execute POST request. Error response: " + responseToString(response)); @@ -492,7 +491,7 @@ public ConnectorStateInfo restartConnectorAndTasks(String connName, boolean only } else { restartEndpoint = endpointForResource(restartPath); } - Response response = requestPost(restartEndpoint, "", Collections.emptyMap()); + Response response = requestPost(restartEndpoint, "", Map.of()); try { if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { //only the 202 status returns a body @@ -577,7 +576,7 @@ public ActiveTopicsInfo connectorTopics(String connectorName) { .readerFor(new TypeReference>>>() { }) .readValue(responseToString(response)); return new ActiveTopicsInfo(connectorName, - activeTopics.get(connectorName).getOrDefault("topics", Collections.emptyList())); + activeTopics.get(connectorName).getOrDefault("topics", List.of())); } } catch (IOException e) { log.error("Could not read connector state from response: {}", @@ -688,7 +687,7 @@ public ConnectorOffsets connectorOffsets(String connectorName) { public String alterSourceConnectorOffset(String connectorName, Map partition, Map offset) { return alterConnectorOffsets( connectorName, - new ConnectorOffsets(Collections.singletonList(new ConnectorOffset(partition, offset))) + new ConnectorOffsets(List.of(new ConnectorOffset(partition, offset))) ); } @@ -705,7 +704,7 @@ public String alterSourceConnectorOffset(String connectorName, Map pa public String alterSinkConnectorOffset(String connectorName, TopicPartition topicPartition, Long offset) { return alterConnectorOffsets( connectorName, - SinkUtils.consumerGroupOffsetsToConnectorOffsets(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset))) + SinkUtils.consumerGroupOffsetsToConnectorOffsets(Map.of(topicPartition, new OffsetAndMetadata(offset))) ); } @@ -929,7 +928,7 @@ public EmbeddedKafkaCluster kafka() { * @throws ConnectException if execution of the GET request fails */ public Response requestGet(String url) { - return requestHttpMethod(url, null, Collections.emptyMap(), "GET"); + return requestHttpMethod(url, null, Map.of(), "GET"); } /** @@ -941,7 +940,7 @@ public Response requestGet(String url) { * @throws ConnectException if execution of the PUT request fails */ public Response requestPut(String url, String body) { - return requestHttpMethod(url, body, Collections.emptyMap(), "PUT"); + return requestHttpMethod(url, body, Map.of(), "PUT"); } /** @@ -966,7 +965,7 @@ public Response requestPost(String url, String body, Map headers * @throws ConnectException if execution of the PATCH request fails */ public Response requestPatch(String url, String body) { - return requestHttpMethod(url, body, Collections.emptyMap(), "PATCH"); + return requestHttpMethod(url, body, Map.of(), "PATCH"); } /** @@ -977,7 +976,7 @@ public Response requestPatch(String url, String body) { * @throws ConnectException if execution of the DELETE request fails */ public Response requestDelete(String url) { - return requestHttpMethod(url, null, Collections.emptyMap(), "DELETE"); + return requestHttpMethod(url, null, Map.of(), "DELETE"); } /** diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java index acf70a9a52033..230d293b51b6d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java @@ -31,7 +31,6 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -109,8 +108,8 @@ public String toString() { @Override protected Set workers() { return connectWorker != null - ? Collections.singleton(connectWorker) - : Collections.emptySet(); + ? Set.of(connectWorker) + : Set.of(); } public Response healthCheck() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java index 5d075ab75e0de..7913d60fc2837 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java @@ -63,9 +63,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -112,7 +110,7 @@ public class EmbeddedKafkaCluster { private KafkaProducer producer; public EmbeddedKafkaCluster(final int numBrokers, final Properties brokerConfig) { - this(numBrokers, brokerConfig, Collections.emptyMap()); + this(numBrokers, brokerConfig, Map.of()); } public EmbeddedKafkaCluster(final int numBrokers, @@ -181,7 +179,7 @@ public void start() { */ public void verifyClusterReadiness() { String consumerGroupId = UUID.randomUUID().toString(); - Map consumerConfig = Collections.singletonMap(GROUP_ID_CONFIG, consumerGroupId); + Map consumerConfig = Map.of(GROUP_ID_CONFIG, consumerGroupId); String topic = "consumer-warmup-" + consumerGroupId; try { @@ -205,8 +203,8 @@ public void verifyClusterReadiness() { } try (Admin admin = createAdminClient()) { - admin.deleteConsumerGroups(Collections.singleton(consumerGroupId)).all().get(30, TimeUnit.SECONDS); - admin.deleteTopics(Collections.singleton(topic)).all().get(30, TimeUnit.SECONDS); + admin.deleteConsumerGroups(Set.of(consumerGroupId)).all().get(30, TimeUnit.SECONDS); + admin.deleteTopics(Set.of(topic)).all().get(30, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new AssertionError("Failed to clean up cluster health check resource(s)", e); } @@ -282,7 +280,7 @@ public boolean sslEnabled() { * @return the map of optional {@link TopicDescription} keyed by the topic name */ public Map> describeTopics(String... topicNames) { - return describeTopics(new HashSet<>(Arrays.asList(topicNames))); + return describeTopics(Set.of(topicNames)); } /** @@ -355,7 +353,7 @@ public void createTopic(String topic) { * @param topic The name of the topic. */ public void createTopic(String topic, int partitions) { - createTopic(topic, partitions, 1, Collections.emptyMap()); + createTopic(topic, partitions, 1, Map.of()); } /** @@ -364,7 +362,7 @@ public void createTopic(String topic, int partitions) { * @param topic The name of the topic. */ public void createTopic(String topic, int partitions, int replication, Map topicConfig) { - createTopic(topic, partitions, replication, topicConfig, Collections.emptyMap()); + createTopic(topic, partitions, replication, topicConfig, Map.of()); } /** @@ -388,7 +386,7 @@ public void createTopic(String topic, int partitions, int replication, Map adminClientConfig) { } public Admin createAdminClient() { - return createAdminClient(Collections.emptyMap()); + return createAdminClient(Map.of()); } /** @@ -449,7 +447,7 @@ public Admin createAdminClient() { * @return a {@link ConsumerRecords} collection containing at least n records. */ public ConsumerRecords consume(int n, long maxDuration, String... topics) { - return consume(n, maxDuration, Collections.emptyMap(), topics); + return consume(n, maxDuration, Map.of(), topics); } /** @@ -525,10 +523,10 @@ public ConsumerRecords consumeAll( long remainingTimeMs; Set topicPartitions; Map endOffsets; - try (Admin admin = createAdminClient(adminProps != null ? adminProps : Collections.emptyMap())) { + try (Admin admin = createAdminClient(adminProps != null ? adminProps : Map.of())) { remainingTimeMs = endTimeMs - System.currentTimeMillis(); - topicPartitions = listPartitions(remainingTimeMs, admin, Arrays.asList(topics)); + topicPartitions = listPartitions(remainingTimeMs, admin, List.of(topics)); remainingTimeMs = endTimeMs - System.currentTimeMillis(); endOffsets = readEndOffsets(remainingTimeMs, admin, topicPartitions); @@ -540,7 +538,7 @@ public ConsumerRecords consumeAll( tp -> new ArrayList<>() )); Map nextOffsets = new HashMap<>(); - try (Consumer consumer = createConsumer(consumerProps != null ? consumerProps : Collections.emptyMap())) { + try (Consumer consumer = createConsumer(consumerProps != null ? consumerProps : Map.of())) { consumer.assign(topicPartitions); while (!endOffsets.isEmpty()) { @@ -556,7 +554,7 @@ public ConsumerRecords consumeAll( } else { remainingTimeMs = endTimeMs - System.currentTimeMillis(); if (remainingTimeMs <= 0) { - throw new AssertionError("failed to read to end of topic(s) " + Arrays.asList(topics) + " within " + maxDurationMs + "ms"); + throw new AssertionError("failed to read to end of topic(s) " + List.of(topics) + " within " + maxDurationMs + "ms"); } // We haven't reached the end offset yet; need to keep polling ConsumerRecords recordBatch = consumer.poll(Duration.ofMillis(remainingTimeMs)); @@ -574,7 +572,7 @@ public ConsumerRecords consumeAll( public long endOffset(TopicPartition topicPartition) throws TimeoutException, InterruptedException, ExecutionException { try (Admin admin = createAdminClient()) { - Map offsets = Collections.singletonMap( + Map offsets = Map.of( topicPartition, OffsetSpec.latest() ); return admin.listOffsets(offsets) @@ -663,9 +661,9 @@ public KafkaConsumer createConsumerAndSubscribeTo(Map createConsumerAndSubscribeTo(Map consumerProps, ConsumerRebalanceListener rebalanceListener, String... topics) { KafkaConsumer consumer = createConsumer(consumerProps); if (rebalanceListener != null) { - consumer.subscribe(Arrays.asList(topics), rebalanceListener); + consumer.subscribe(List.of(topics), rebalanceListener); } else { - consumer.subscribe(Arrays.asList(topics)); + consumer.subscribe(List.of(topics)); } return consumer; } diff --git a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java index 9f3de801f16c8..f68b4eb4e581a 100644 --- a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java +++ b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java @@ -49,9 +49,9 @@ public void configure(final Map configs, final boolean isKey) { private String version(InputStream stream) throws IOException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { return reader.lines() - .filter(s -> !s.isEmpty() && !s.startsWith("#")) - .collect(Collectors.toList()) - .get(0); + .filter(s -> !s.isEmpty() && !s.startsWith("#")) + .findFirst() + .get(); } } diff --git a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java index caeb4340d6e60..863ed9fad97dc 100644 --- a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java +++ b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java @@ -49,9 +49,9 @@ public void configure(final Map configs, final boolean isKey) { private String version(InputStream stream) throws IOException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { return reader.lines() - .filter(s -> !s.isEmpty() && !s.startsWith("#")) - .collect(Collectors.toList()) - .get(0); + .filter(s -> !s.isEmpty() && !s.startsWith("#")) + .findFirst() + .get(); } } diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-converter/META-INF/services/org.apache.kafka.connect.storage.Converter b/connect/runtime/src/test/resources/test-plugins/versioned-converter/META-INF/services/org.apache.kafka.connect.storage.Converter new file mode 100644 index 0000000000000..d37bb90859a2a --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-converter/META-INF/services/org.apache.kafka.connect.storage.Converter @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedConverter diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-converter/test/plugins/VersionedConverter.java b/connect/runtime/src/test/resources/test-plugins/versioned-converter/test/plugins/VersionedConverter.java new file mode 100644 index 0000000000000..766f29330ef8b --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-converter/test/plugins/VersionedConverter.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import java.util.Map; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaAndValue; +import org.apache.kafka.connect.storage.Converter; + +/** + * Converter to test multiverioning of plugins. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedConverter implements Converter, Versioned { + + public VersionedConverter() { + super(); + } + + @Override + public void configure(final Map configs, final boolean isKey) { + } + + @Override + public byte[] fromConnectData(final String topic, final Schema schema, final Object value) { + return new byte[0]; + } + + @Override + public SchemaAndValue toConnectData(final String topic, final byte[] value) { + return null; + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of converter + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + +} diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/META-INF/services/org.apache.kafka.connect.storage.HeaderConverter b/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/META-INF/services/org.apache.kafka.connect.storage.HeaderConverter new file mode 100644 index 0000000000000..25e4b7665be99 --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/META-INF/services/org.apache.kafka.connect.storage.HeaderConverter @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedHeaderConverter diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/test/plugins/VersionedHeaderConverter.java b/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/test/plugins/VersionedHeaderConverter.java new file mode 100644 index 0000000000000..c0ef947e6694f --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-header-converter/test/plugins/VersionedHeaderConverter.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaAndValue; +import org.apache.kafka.connect.storage.HeaderConverter; + +import java.util.Map; + +/** + * Header Converter to test multiverioning of plugins. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedHeaderConverter implements HeaderConverter, Versioned { + + public VersionedHeaderConverter() { + super(); + } + + @Override + public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) { + return null; + } + + @Override + public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { + return new byte[0]; + } + + @Override + public void configure(final Map configs) { + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of header converter + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + @Override + public void close() { + } +} diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-predicate/META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate b/connect/runtime/src/test/resources/test-plugins/versioned-predicate/META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate new file mode 100644 index 0000000000000..af841817aba2f --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-predicate/META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedPredicate diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-predicate/test/plugins/VersionedPredicate.java b/connect/runtime/src/test/resources/test-plugins/versioned-predicate/test/plugins/VersionedPredicate.java new file mode 100644 index 0000000000000..2e92c79c3517d --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-predicate/test/plugins/VersionedPredicate.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.connector.ConnectRecord; +import org.apache.kafka.connect.transforms.predicates.Predicate; + +import java.util.Map; + +/** + /** + * Predicate to test multiverioning of plugins. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedPredicate> implements Predicate, Versioned { + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of the predicate + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public boolean test(R record) { + return false; + } + + @Override + public void close() { + + } + + @Override + public void configure(Map configs) { + + } +} \ No newline at end of file diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/META-INF/services/org.apache.kafka.connect.sink.SinkConnector b/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/META-INF/services/org.apache.kafka.connect.sink.SinkConnector new file mode 100644 index 0000000000000..a5c560853f0ed --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/META-INF/services/org.apache.kafka.connect.sink.SinkConnector @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedSinkConnector diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/test/plugins/VersionedSinkConnector.java b/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/test/plugins/VersionedSinkConnector.java new file mode 100644 index 0000000000000..9710099368bf5 --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-sink-connector/test/plugins/VersionedSinkConnector.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import java.util.Collection; +import java.util.Collections; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +/** + * VersionedSamplingSourceConnector is a test connector that extends SamplingConnector and overrides the version method. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedSinkConnector extends SinkConnector { + + public VersionedSinkConnector() { + } + + @Override + public void start(Map props) { + } + + @Override + public Class taskClass() { + return VersionedSinkConnectorTask.class; + } + + @Override + public List> taskConfigs(int maxTasks) { + List> configs = new ArrayList<>(); + for (int i = 0; i < maxTasks; i++) { + configs.add(Collections.singletonMap("task-config-version", "PLACEHOLDER_FOR_VERSION")); + } + return configs; + } + + @Override + public void stop() { + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of connector + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + public static class VersionedSinkConnectorTask extends SinkTask { + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + @Override + public void start(Map props) { + } + + @Override + public void put(Collection records) { + } + + @Override + public void stop() { + } + } +} diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/META-INF/services/org.apache.kafka.connect.source.SourceConnector b/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/META-INF/services/org.apache.kafka.connect.source.SourceConnector new file mode 100644 index 0000000000000..efee272749d59 --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/META-INF/services/org.apache.kafka.connect.source.SourceConnector @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedSourceConnector diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/test/plugins/VersionedSourceConnector.java b/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/test/plugins/VersionedSourceConnector.java new file mode 100644 index 0000000000000..4ef066b4c8c9b --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-source-connector/test/plugins/VersionedSourceConnector.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import java.util.Collections; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.source.SourceConnector; +import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.source.SourceTask; + +/** + * VersionedSamplingSourceConnector is a test connector that extends SamplingConnector and overrides the version method. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedSourceConnector extends SourceConnector { + + public VersionedSourceConnector() { + } + + @Override + public void start(Map props) { + } + + @Override + public Class taskClass() { + return VersionedSourceConnectorTask.class; + } + + @Override + public List> taskConfigs(int maxTasks) { + List> configs = new ArrayList<>(); + for (int i = 0; i < maxTasks; i++) { + configs.add(Collections.singletonMap("task-config-version", "PLACEHOLDER_FOR_VERSION")); + } + return configs; + } + + @Override + public void stop() { + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of connector + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + public static class VersionedSourceConnectorTask extends SourceTask { + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + @Override + public void start(Map props) { + } + + @Override + public List poll() { + return Collections.emptyList(); + } + + @Override + public void stop() { + } + } +} diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-transformation/META-INF/services/org.apache.kafka.connect.transforms.Transformation b/connect/runtime/src/test/resources/test-plugins/versioned-transformation/META-INF/services/org.apache.kafka.connect.transforms.Transformation new file mode 100644 index 0000000000000..7fed78370ff5d --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-transformation/META-INF/services/org.apache.kafka.connect.transforms.Transformation @@ -0,0 +1,16 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +test.plugins.VersionedTransformation diff --git a/connect/runtime/src/test/resources/test-plugins/versioned-transformation/test/plugins/VersionedTransformation.java b/connect/runtime/src/test/resources/test-plugins/versioned-transformation/test/plugins/VersionedTransformation.java new file mode 100644 index 0000000000000..0422834d027cb --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/versioned-transformation/test/plugins/VersionedTransformation.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test.plugins; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.connector.ConnectRecord; +import org.apache.kafka.connect.transforms.Transformation; + +import java.util.Map; + +/** + * Transformation to test multiverioning of plugins. + * Any instance of the string PLACEHOLDER_FOR_VERSION will be replaced with the actual version during plugin compilation. + */ +public class VersionedTransformation> implements Transformation, Versioned { + + + @Override + public R apply(R record) { + return null; + } + + @Override + public String version() { + return "PLACEHOLDER_FOR_VERSION"; + } + + @Override + public ConfigDef config() { + return new ConfigDef() + // version specific config will have the defaul value (PLACEHOLDER_FOR_VERSION) replaced with the actual version during plugin compilation + // this will help with testing differnt configdef for different version of the transformation + .define("version-specific-config", ConfigDef.Type.STRING, "PLACEHOLDER_FOR_VERSION", ConfigDef.Importance.HIGH, "version specific docs") + .define("other-config", ConfigDef.Type.STRING, "defaultVal", ConfigDef.Importance.HIGH, "other docs"); + } + + @Override + public void close() { + + } + + @Override + public void configure(Map configs) { + + } +} diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java index 267466a4b0b52..f598feede8c1d 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java @@ -24,7 +24,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; @@ -91,7 +90,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { log.debug("Creating single task for MockConnector"); - return Collections.singletonList(config); + return List.of(config); } @Override diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java index f69c58b99ab4c..49dc5e8a7e694 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -67,7 +66,7 @@ public List poll() { throw new RuntimeException(); } } - return Collections.emptyList(); + return List.of(); } @Override diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java index c40e0932e5317..d79c133f67383 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -104,7 +103,7 @@ public void start(Map props) { } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); - partition = Collections.singletonMap(ID_FIELD, id); + partition = Map.of(ID_FIELD, id); Map previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; @@ -124,7 +123,7 @@ public List poll() { throttler.throttle(); } - Map ccOffset = Collections.singletonMap(SEQNO_FIELD, seqno); + Map ccOffset = Map.of(SEQNO_FIELD, seqno); int partitionVal = (int) (seqno % partitionCount); final Struct data; final SourceRecord srcRecord; @@ -158,10 +157,10 @@ public List poll() { System.out.println("{\"task\": " + id + ", \"seqno\": " + seqno + "}"); seqno++; count++; - return Collections.singletonList(srcRecord); + return List.of(srcRecord); } else { throttler.throttle(); - return Collections.emptyList(); + return List.of(); } } diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java index 49151b40d1ebb..1fe2bd318023c 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java @@ -31,7 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -94,7 +93,7 @@ public void start(Map props) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } - partition = Collections.singletonMap(ID_FIELD, id); + partition = Map.of(ID_FIELD, id); Map previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; @@ -129,11 +128,11 @@ public List poll() { } System.out.println(dataJson); - Map ccOffset = Collections.singletonMap(SEQNO_FIELD, seqno); + Map ccOffset = Map.of(SEQNO_FIELD, seqno); Schema valueSchema = completeRecordData ? COMPLETE_VALUE_SCHEMA : Schema.INT64_SCHEMA; Object value = completeRecordData ? completeValue(data) : seqno; SourceRecord srcRecord = new SourceRecord(partition, ccOffset, topic, Schema.INT32_SCHEMA, id, valueSchema, value); - List result = Collections.singletonList(srcRecord); + List result = List.of(srcRecord); seqno++; return result; } diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java index 099e1a64882de..7c13ef4d785de 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java @@ -231,38 +231,26 @@ private Schema getOrBuildSchema(Schema valueSchema) { } private SchemaBuilder convertFieldType(Schema.Type type) { - switch (type) { - case INT8: - return SchemaBuilder.int8(); - case INT16: - return SchemaBuilder.int16(); - case INT32: - return SchemaBuilder.int32(); - case INT64: - return SchemaBuilder.int64(); - case FLOAT32: - return SchemaBuilder.float32(); - case FLOAT64: - return SchemaBuilder.float64(); - case BOOLEAN: - return SchemaBuilder.bool(); - case STRING: - return SchemaBuilder.string(); - default: - throw new DataException("Unexpected type in Cast transformation: " + type); - } + return switch (type) { + case INT8 -> SchemaBuilder.int8(); + case INT16 -> SchemaBuilder.int16(); + case INT32 -> SchemaBuilder.int32(); + case INT64 -> SchemaBuilder.int64(); + case FLOAT32 -> SchemaBuilder.float32(); + case FLOAT64 -> SchemaBuilder.float64(); + case BOOLEAN -> SchemaBuilder.bool(); + case STRING -> SchemaBuilder.string(); + default -> throw new DataException("Unexpected type in Cast transformation: " + type); + }; } private static Object encodeLogicalType(Schema schema, Object value) { - switch (schema.name()) { - case Date.LOGICAL_NAME: - return Date.fromLogical(schema, (java.util.Date) value); - case Time.LOGICAL_NAME: - return Time.fromLogical(schema, (java.util.Date) value); - case Timestamp.LOGICAL_NAME: - return Timestamp.fromLogical(schema, (java.util.Date) value); - } - return value; + return switch (schema.name()) { + case Date.LOGICAL_NAME -> Date.fromLogical(schema, (java.util.Date) value); + case Time.LOGICAL_NAME -> Time.fromLogical(schema, (java.util.Date) value); + case Timestamp.LOGICAL_NAME -> Timestamp.fromLogical(schema, (java.util.Date) value); + default -> value; + }; } private static Object castValueToType(Schema schema, Object value, Schema.Type targetType) { @@ -283,26 +271,17 @@ private static Object castValueToType(Schema schema, Object value, Schema.Type t value = encodeLogicalType(schema, value); } - switch (targetType) { - case INT8: - return castToInt8(value); - case INT16: - return castToInt16(value); - case INT32: - return castToInt32(value); - case INT64: - return castToInt64(value); - case FLOAT32: - return castToFloat32(value); - case FLOAT64: - return castToFloat64(value); - case BOOLEAN: - return castToBoolean(value); - case STRING: - return castToString(value); - default: - throw new DataException(targetType + " is not supported in the Cast transformation."); - } + return switch (targetType) { + case INT8 -> castToInt8(value); + case INT16 -> castToInt16(value); + case INT32 -> castToInt32(value); + case INT64 -> castToInt64(value); + case FLOAT32 -> castToFloat32(value); + case FLOAT64 -> castToFloat64(value); + case BOOLEAN -> castToBoolean(value); + case STRING -> castToString(value); + default -> throw new DataException(targetType + " is not supported in the Cast transformation."); + }; } catch (NumberFormatException e) { throw new DataException("Value (" + value.toString() + ") was out of range for requested data type", e); } diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java index c1d20a48c1d29..cd87c33a5095e 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java @@ -23,7 +23,6 @@ import org.apache.kafka.connect.header.ConnectHeaders; import org.apache.kafka.connect.header.Header; import org.apache.kafka.connect.header.Headers; -import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.util.HashSet; @@ -41,7 +40,8 @@ public class DropHeaders> implements Transformation MOVE; + case COPY_OPERATION -> COPY; + default -> throw new IllegalArgumentException(); + }; } public String toString() { diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java index c3a45d9170e59..7d37d548eb486 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java @@ -25,7 +25,6 @@ import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Values; import org.apache.kafka.connect.errors.DataException; -import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.math.BigDecimal; @@ -55,7 +54,8 @@ public abstract class MaskField> implements Transform private static final String REPLACE_NULL_WITH_DEFAULT_CONFIG = "replace.null.with.default"; public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, new NonEmptyListValidator(), + .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, + ConfigDef.ValidList.anyNonDuplicateValues(false, false), ConfigDef.Importance.HIGH, "Names of fields to mask.") .define(REPLACEMENT_CONFIG, ConfigDef.Type.STRING, null, new ConfigDef.NonEmptyString(), ConfigDef.Importance.LOW, "Custom value replacement, that will be applied to all" @@ -65,32 +65,30 @@ public abstract class MaskField> implements Transform private static final String PURPOSE = "mask fields"; - private static final Map, Function> REPLACEMENT_MAPPING_FUNC = new HashMap<>(); - private static final Map, Object> PRIMITIVE_VALUE_MAPPING = new HashMap<>(); - - static { - PRIMITIVE_VALUE_MAPPING.put(Boolean.class, Boolean.FALSE); - PRIMITIVE_VALUE_MAPPING.put(Byte.class, (byte) 0); - PRIMITIVE_VALUE_MAPPING.put(Short.class, (short) 0); - PRIMITIVE_VALUE_MAPPING.put(Integer.class, 0); - PRIMITIVE_VALUE_MAPPING.put(Long.class, 0L); - PRIMITIVE_VALUE_MAPPING.put(Float.class, 0f); - PRIMITIVE_VALUE_MAPPING.put(Double.class, 0d); - PRIMITIVE_VALUE_MAPPING.put(BigInteger.class, BigInteger.ZERO); - PRIMITIVE_VALUE_MAPPING.put(BigDecimal.class, BigDecimal.ZERO); - PRIMITIVE_VALUE_MAPPING.put(Date.class, new Date(0)); - PRIMITIVE_VALUE_MAPPING.put(String.class, ""); - - REPLACEMENT_MAPPING_FUNC.put(Byte.class, v -> Values.convertToByte(null, v)); - REPLACEMENT_MAPPING_FUNC.put(Short.class, v -> Values.convertToShort(null, v)); - REPLACEMENT_MAPPING_FUNC.put(Integer.class, v -> Values.convertToInteger(null, v)); - REPLACEMENT_MAPPING_FUNC.put(Long.class, v -> Values.convertToLong(null, v)); - REPLACEMENT_MAPPING_FUNC.put(Float.class, v -> Values.convertToFloat(null, v)); - REPLACEMENT_MAPPING_FUNC.put(Double.class, v -> Values.convertToDouble(null, v)); - REPLACEMENT_MAPPING_FUNC.put(String.class, Function.identity()); - REPLACEMENT_MAPPING_FUNC.put(BigDecimal.class, BigDecimal::new); - REPLACEMENT_MAPPING_FUNC.put(BigInteger.class, BigInteger::new); - } + private static final Map, Function> REPLACEMENT_MAPPING_FUNC = Map.of( + Byte.class, v -> Values.convertToByte(null, v), + Short.class, v -> Values.convertToShort(null, v), + Integer.class, v -> Values.convertToInteger(null, v), + Long.class, v -> Values.convertToLong(null, v), + Float.class, v -> Values.convertToFloat(null, v), + Double.class, v -> Values.convertToDouble(null, v), + String.class, Function.identity(), + BigDecimal.class, BigDecimal::new, + BigInteger.class, BigInteger::new + ); + private static final Map, Object> PRIMITIVE_VALUE_MAPPING = Map.ofEntries( + Map.entry(Boolean.class, Boolean.FALSE), + Map.entry(Byte.class, (byte) 0), + Map.entry(Short.class, (short) 0), + Map.entry(Integer.class, 0), + Map.entry(Long.class, 0L), + Map.entry(Float.class, 0f), + Map.entry(Double.class, 0d), + Map.entry(BigInteger.class, BigInteger.ZERO), + Map.entry(BigDecimal.class, BigDecimal.ZERO), + Map.entry(Date.class, new Date(0)), + Map.entry(String.class, "") + ); private Set maskedFields; private String replacement; diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java index 38d27e8a818f8..7e8f6700bf634 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java @@ -31,7 +31,6 @@ import org.apache.kafka.connect.transforms.util.SchemaUtil; import org.apache.kafka.connect.transforms.util.SimpleConfig; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -56,11 +55,19 @@ interface ConfigName { } public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(ConfigName.EXCLUDE, ConfigDef.Type.LIST, Collections.emptyList(), ConfigDef.Importance.MEDIUM, + .define(ConfigName.EXCLUDE, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.MEDIUM, "Fields to exclude. This takes precedence over the fields to include.") - .define(ConfigName.INCLUDE, ConfigDef.Type.LIST, Collections.emptyList(), ConfigDef.Importance.MEDIUM, + .define(ConfigName.INCLUDE, + ConfigDef.Type.LIST, + List.of(), + ConfigDef.ValidList.anyNonDuplicateValues(true, false), + ConfigDef.Importance.MEDIUM, "Fields to include. If specified, only these fields will be used.") - .define(ConfigName.RENAMES, ConfigDef.Type.LIST, Collections.emptyList(), + .define(ConfigName.RENAMES, ConfigDef.Type.LIST, List.of(), ConfigDef.LambdaValidator.with( (name, value) -> { @SuppressWarnings("unchecked") diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java index aeec9ea41892c..940bb6045a9dd 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java @@ -166,17 +166,15 @@ public String toType(Config config, Date orig) { public Date toRaw(Config config, Object orig) { if (!(orig instanceof Long unixTime)) throw new DataException("Expected Unix timestamp to be a Long, but found " + orig.getClass()); - switch (config.unixPrecision) { - case UNIX_PRECISION_SECONDS: - return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.SECONDS.toMillis(unixTime)); - case UNIX_PRECISION_MICROS: - return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.MICROSECONDS.toMillis(unixTime)); - case UNIX_PRECISION_NANOS: - return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.NANOSECONDS.toMillis(unixTime)); - case UNIX_PRECISION_MILLIS: - default: - return Timestamp.toLogical(Timestamp.SCHEMA, unixTime); - } + return switch (config.unixPrecision) { + case UNIX_PRECISION_SECONDS -> + Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.SECONDS.toMillis(unixTime)); + case UNIX_PRECISION_MICROS -> + Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.MICROSECONDS.toMillis(unixTime)); + case UNIX_PRECISION_NANOS -> + Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.NANOSECONDS.toMillis(unixTime)); + default -> Timestamp.toLogical(Timestamp.SCHEMA, unixTime); + }; } @Override @@ -186,18 +184,13 @@ public Schema typeSchema(boolean isOptional) { @Override public Long toType(Config config, Date orig) { - Long unixTimeMillis = Timestamp.fromLogical(Timestamp.SCHEMA, orig); - switch (config.unixPrecision) { - case UNIX_PRECISION_SECONDS: - return TimeUnit.MILLISECONDS.toSeconds(unixTimeMillis); - case UNIX_PRECISION_MICROS: - return TimeUnit.MILLISECONDS.toMicros(unixTimeMillis); - case UNIX_PRECISION_NANOS: - return TimeUnit.MILLISECONDS.toNanos(unixTimeMillis); - case UNIX_PRECISION_MILLIS: - default: - return unixTimeMillis; - } + long unixTimeMillis = Timestamp.fromLogical(Timestamp.SCHEMA, orig); + return switch (config.unixPrecision) { + case UNIX_PRECISION_SECONDS -> TimeUnit.MILLISECONDS.toSeconds(unixTimeMillis); + case UNIX_PRECISION_MICROS -> TimeUnit.MILLISECONDS.toMicros(unixTimeMillis); + case UNIX_PRECISION_NANOS -> TimeUnit.MILLISECONDS.toNanos(unixTimeMillis); + default -> unixTimeMillis; + }; } }); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java index 24cdec2249ab1..19c299e6867e6 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java @@ -28,7 +28,6 @@ import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.errors.DataException; -import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.util.HashMap; @@ -46,7 +45,7 @@ public class ValueToKey> implements Transformation public static final String REPLACE_NULL_WITH_DEFAULT_CONFIG = "replace.null.with.default"; public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, new NonEmptyListValidator(), ConfigDef.Importance.HIGH, + .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, ConfigDef.ValidList.anyNonDuplicateValues(false, false), ConfigDef.Importance.HIGH, "Field names on the record value to extract as the record key.") .define(REPLACE_NULL_WITH_DEFAULT_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, "Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used."); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java index 514ebb425ebe6..4b585d663b8dc 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java @@ -44,13 +44,13 @@ public enum FieldSyntaxVersion { public static final String FIELD_SYNTAX_VERSION_CONFIG = "field.syntax.version"; public static final String FIELD_SYNTAX_VERSION_DOC = "Defines the version of the syntax to access fields. " - + "If set to `V1`, then the field paths are limited to access the elements at the root level of the struct or map. " - + "If set to `V2`, the syntax will support accessing nested elements. " + + "If set to V1, then the field paths are limited to access the elements at the root level of the struct or map. " + + "If set to V2, the syntax will support accessing nested elements. " + "To access nested elements, dotted notation is used. " + "If dots are already included in the field name, " + "then backtick pairs can be used to wrap field names containing dots. " - + "E.g. to access the subfield `baz` from a field named \"foo.bar\" in a struct/map " - + "the following format can be used to access its elements: \"`foo.bar`.baz\"."; + + "E.g. to access the subfield baz from a field named \"foo.bar\" in a struct/map " + + "the following format can be used to access its elements: \"foo.bar.baz\"."; public static final String FIELD_SYNTAX_VERSION_DEFAULT_VALUE = V1.name(); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java index 326a844025d63..6016707d36764 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java @@ -22,7 +22,6 @@ import org.apache.kafka.connect.data.Struct; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -53,7 +52,7 @@ public SingleFieldPath(String pathText, FieldSyntaxVersion version) { this.version = version; switch (version) { case V1: // backward compatibility - this.steps = Collections.singletonList(pathText); + this.steps = List.of(pathText); break; case V2: this.steps = buildFieldPathV2(pathText); @@ -134,7 +133,7 @@ private static List buildFieldPathV2(String path) { // add last step if last char is a dot if (!path.isEmpty() && path.charAt(path.length() - 1) == DOT) steps.add(""); - return Collections.unmodifiableList(steps); + return List.copyOf(steps); } private static void failWhenIncompleteBacktickPair(String path, int backtickAt) { diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java index e79e163b46394..1a470095a417d 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java @@ -39,7 +39,6 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Arrays; -import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -72,38 +71,38 @@ public void teardown() { @Test public void testConfigEmpty() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, ""))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, ""))); } @Test public void testConfigInvalidSchemaType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:faketype"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:faketype"))); } @Test public void testConfigInvalidTargetType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:array"))); - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "array"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:array"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "array"))); } @Test public void testUnsupportedTargetType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:bytes"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:bytes"))); } @Test public void testConfigInvalidMap() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8:extra"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int8:extra"))); } @Test public void testConfigMixWholeAndFieldTransformation() { - assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8,int32"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int8,int32"))); } @Test public void castNullValueRecordWithSchema() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, "key", Schema.STRING_SCHEMA, null); SourceRecord transformed = xformValue.apply(original); @@ -129,7 +128,7 @@ public void castFieldWithDefaultValueRecordWithSchema(boolean replaceNullWithDef @Test public void castNullValueRecordSchemaless() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, "key", null, null); SourceRecord transformed = xformValue.apply(original); @@ -138,7 +137,7 @@ public void castNullValueRecordSchemaless() { @Test public void castNullKeyRecordWithSchema() { - xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); + xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, null, Schema.STRING_SCHEMA, "value"); SourceRecord transformed = xformKey.apply(original); @@ -147,7 +146,7 @@ public void castNullKeyRecordWithSchema() { @Test public void castNullKeyRecordSchemaless() { - xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); + xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, null, null, Schema.STRING_SCHEMA, "value"); SourceRecord transformed = xformKey.apply(original); @@ -156,7 +155,7 @@ public void castNullKeyRecordSchemaless() { @Test public void castWholeRecordKeyWithSchema() { - xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); + xformKey.configure(Map.of(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42, Schema.STRING_SCHEMA, "bogus")); @@ -166,7 +165,7 @@ public void castWholeRecordKeyWithSchema() { @Test public void castWholeRecordValueWithSchemaInt8() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -176,7 +175,7 @@ public void castWholeRecordValueWithSchemaInt8() { @Test public void castWholeRecordValueWithSchemaInt16() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int16")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int16")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -186,7 +185,7 @@ public void castWholeRecordValueWithSchemaInt16() { @Test public void castWholeRecordValueWithSchemaInt32() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -196,7 +195,7 @@ public void castWholeRecordValueWithSchemaInt32() { @Test public void castWholeRecordValueWithSchemaInt64() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -206,7 +205,7 @@ public void castWholeRecordValueWithSchemaInt64() { @Test public void castWholeRecordValueWithSchemaFloat32() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -216,7 +215,7 @@ public void castWholeRecordValueWithSchemaFloat32() { @Test public void castWholeRecordValueWithSchemaFloat64() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -226,7 +225,7 @@ public void castWholeRecordValueWithSchemaFloat64() { @Test public void castWholeRecordValueWithSchemaBooleanTrue() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -236,7 +235,7 @@ public void castWholeRecordValueWithSchemaBooleanTrue() { @Test public void castWholeRecordValueWithSchemaBooleanFalse() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 0)); @@ -246,7 +245,7 @@ public void castWholeRecordValueWithSchemaBooleanFalse() { @Test public void castWholeRecordValueWithSchemaString() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -257,7 +256,7 @@ public void castWholeRecordValueWithSchemaString() { @Test public void castWholeBigDecimalRecordValueWithSchemaString() { BigDecimal bigDecimal = new BigDecimal(42); - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Decimal.schema(bigDecimal.scale()), bigDecimal)); @@ -268,7 +267,7 @@ public void castWholeBigDecimalRecordValueWithSchemaString() { @Test public void castWholeDateRecordValueWithSchemaString() { Date timestamp = new Date(MILLIS_PER_DAY + 1); // day + 1msec to get a timestamp formatting. - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Timestamp.SCHEMA, timestamp)); @@ -279,7 +278,7 @@ public void castWholeDateRecordValueWithSchemaString() { @Test public void castWholeRecordDefaultValue() { // Validate default value in schema is correctly converted - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, SchemaBuilder.float32().defaultValue(-42.125f).build(), 42.125f)); @@ -290,7 +289,7 @@ public void castWholeRecordDefaultValue() { @Test public void castWholeRecordKeySchemaless() { - xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); + xformKey.configure(Map.of(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, null, 42, Schema.STRING_SCHEMA, "bogus")); @@ -300,7 +299,7 @@ public void castWholeRecordKeySchemaless() { @Test public void castWholeRecordValueSchemalessInt8() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -310,7 +309,7 @@ public void castWholeRecordValueSchemalessInt8() { @Test public void castWholeRecordValueSchemalessInt16() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int16")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int16")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -320,7 +319,7 @@ public void castWholeRecordValueSchemalessInt16() { @Test public void castWholeRecordValueSchemalessInt32() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -330,7 +329,7 @@ public void castWholeRecordValueSchemalessInt32() { @Test public void castWholeRecordValueSchemalessInt64() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -340,7 +339,7 @@ public void castWholeRecordValueSchemalessInt64() { @Test public void castWholeRecordValueSchemalessFloat32() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -350,7 +349,7 @@ public void castWholeRecordValueSchemalessFloat32() { @Test public void castWholeRecordValueSchemalessFloat64() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float64")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -360,7 +359,7 @@ public void castWholeRecordValueSchemalessFloat64() { @Test public void castWholeRecordValueSchemalessBooleanTrue() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -370,7 +369,7 @@ public void castWholeRecordValueSchemalessBooleanTrue() { @Test public void castWholeRecordValueSchemalessBooleanFalse() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 0)); @@ -380,7 +379,7 @@ public void castWholeRecordValueSchemalessBooleanFalse() { @Test public void castWholeRecordValueSchemalessString() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -390,15 +389,15 @@ public void castWholeRecordValueSchemalessString() { @Test public void castWholeRecordValueSchemalessUnsupportedType() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, - null, Collections.singletonList("foo")))); + null, List.of("foo")))); } @Test public void castLogicalToPrimitive() { - List specParts = Arrays.asList( + List specParts = List.of( "date_to_int32:int32", // Cast to underlying representation "timestamp_to_int64:int64", // Cast to underlying representation "time_to_int64:int64", // Cast to wider datatype than underlying representation @@ -408,7 +407,7 @@ public void castLogicalToPrimitive() { ); Date day = new Date(MILLIS_PER_DAY); - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, + xformValue.configure(Map.of(Cast.SPEC_CONFIG, String.join(",", specParts))); SchemaBuilder builder = SchemaBuilder.struct(); @@ -455,7 +454,7 @@ public void castLogicalToString() { Date time = new Date(MILLIS_PER_HOUR); Date timestamp = new Date(); - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "date:string,decimal:string,time:string,timestamp:string")); SchemaBuilder builder = SchemaBuilder.struct(); @@ -494,7 +493,7 @@ public void castFieldsWithSchema() { byte[] byteArray = new byte[] {(byte) 0xFE, (byte) 0xDC, (byte) 0xBA, (byte) 0x98, 0x76, 0x54, 0x32, 0x10}; ByteBuffer byteBuffer = ByteBuffer.wrap(Arrays.copyOf(byteArray, byteArray.length)); - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32,bigdecimal:string,date:string,optional:int32,bytes:string,byteArray:string")); // Include an optional fields and fields with defaults to validate their values are passed through properly @@ -578,7 +577,7 @@ public void castFieldsWithSchema() { @SuppressWarnings("unchecked") @Test public void castFieldsSchemaless() { - xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32")); + xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32")); Map recordValue = new HashMap<>(); recordValue.put("int8", (byte) 8); recordValue.put("int16", (short) 16); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java index d164512897b64..2def8f2e4d226 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java @@ -25,10 +25,9 @@ import org.junit.jupiter.api.Test; import java.util.HashMap; +import java.util.List; import java.util.Map; -import static java.util.Arrays.asList; -import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -38,7 +37,7 @@ public class DropHeadersTest { private Map config(String... headers) { Map result = new HashMap<>(); - result.put(DropHeaders.HEADERS_FIELD, asList(headers)); + result.put(DropHeaders.HEADERS_FIELD, List.of(headers)); return result; } @@ -106,8 +105,8 @@ private void assertNonHeaders(SourceRecord original, SourceRecord xformed) { } private SourceRecord sourceRecord(ConnectHeaders headers) { - Map sourcePartition = singletonMap("foo", "bar"); - Map sourceOffset = singletonMap("baz", "quxx"); + Map sourcePartition = Map.of("foo", "bar"); + Map sourceOffset = Map.of("baz", "quxx"); String topic = "topic"; Integer partition = 0; Schema keySchema = null; diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java index ff11ffe4e852d..414dec56095d3 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -57,9 +56,9 @@ public void teardown() { @Test public void schemaless() { - xformKey.configure(Collections.singletonMap("field", "magic")); + xformKey.configure(Map.of("field", "magic")); - final SinkRecord record = new SinkRecord("test", 0, null, Collections.singletonMap("magic", 42), null, null, 0); + final SinkRecord record = new SinkRecord("test", 0, null, Map.of("magic", 42), null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); assertNull(transformedRecord.keySchema()); @@ -73,7 +72,7 @@ public void schemalessAndNestedPath() { configs.put("field", "magic.foo"); xformKey.configure(configs); - final Map key = Collections.singletonMap("magic", Collections.singletonMap("foo", 42)); + final Map key = Map.of("magic", Map.of("foo", 42)); final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); @@ -83,7 +82,7 @@ public void schemalessAndNestedPath() { @Test public void nullSchemaless() { - xformKey.configure(Collections.singletonMap("field", "magic")); + xformKey.configure(Map.of("field", "magic")); final Map key = null; final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); @@ -95,7 +94,7 @@ public void nullSchemaless() { @Test public void withSchema() { - xformKey.configure(Collections.singletonMap("field", "magic")); + xformKey.configure(Map.of("field", "magic")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).build(); final Struct key = new Struct(keySchema).put("magic", 42); @@ -125,7 +124,7 @@ public void withSchemaAndNestedPath() { @Test public void testNullWithSchema() { - xformKey.configure(Collections.singletonMap("field", "magic")); + xformKey.configure(Map.of("field", "magic")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).optional().build(); final Struct key = null; @@ -138,9 +137,9 @@ public void testNullWithSchema() { @Test public void nonExistentFieldSchemalessShouldReturnNull() { - xformKey.configure(Collections.singletonMap("field", "nonexistent")); + xformKey.configure(Map.of("field", "nonexistent")); - final SinkRecord record = new SinkRecord("test", 0, null, Collections.singletonMap("magic", 42), null, null, 0); + final SinkRecord record = new SinkRecord("test", 0, null, Map.of("magic", 42), null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); assertNull(transformedRecord.keySchema()); @@ -154,7 +153,7 @@ public void nonExistentNestedFieldSchemalessShouldReturnNull() { configs.put("field", "magic.nonexistent"); xformKey.configure(configs); - final Map key = Collections.singletonMap("magic", Collections.singletonMap("foo", 42)); + final Map key = Map.of("magic", Map.of("foo", 42)); final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); @@ -164,7 +163,7 @@ public void nonExistentNestedFieldSchemalessShouldReturnNull() { @Test public void nonExistentFieldWithSchemaShouldFail() { - xformKey.configure(Collections.singletonMap("field", "nonexistent")); + xformKey.configure(Map.of("field", "nonexistent")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).build(); final Struct key = new Struct(keySchema).put("magic", 42); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java index f771d4f0ac3e4..8873f4c03b0b8 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java @@ -27,10 +27,9 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -52,21 +51,21 @@ public void teardown() { @Test public void topLevelStructRequired() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42))); } @Test public void topLevelMapRequired() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42))); } @Test public void testNestedStruct() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); SchemaBuilder builder = SchemaBuilder.struct(); builder.field("int8", Schema.INT8_SCHEMA); @@ -125,7 +124,7 @@ public void testNestedStruct() { @Test public void testNestedMapWithDelimiter() { - xformValue.configure(Collections.singletonMap("delimiter", "#")); + xformValue.configure(Map.of("delimiter", "#")); Map supportedTypes = new HashMap<>(); supportedTypes.put("int8", (byte) 8); @@ -138,8 +137,8 @@ public void testNestedMapWithDelimiter() { supportedTypes.put("string", "stringy"); supportedTypes.put("bytes", "bytes".getBytes()); - Map oneLevelNestedMap = Collections.singletonMap("B", supportedTypes); - Map twoLevelNestedMap = Collections.singletonMap("A", oneLevelNestedMap); + Map oneLevelNestedMap = Map.of("B", supportedTypes); + Map twoLevelNestedMap = Map.of("A", oneLevelNestedMap); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, @@ -163,7 +162,7 @@ public void testNestedMapWithDelimiter() { @Test public void testOptionalFieldStruct() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); SchemaBuilder builder = SchemaBuilder.struct(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -190,7 +189,7 @@ public void testOptionalFieldStruct() { @Test public void testOptionalStruct() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -206,7 +205,7 @@ public void testOptionalStruct() { @Test public void testOptionalNestedStruct() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -230,12 +229,12 @@ public void testOptionalNestedStruct() { @Test public void testOptionalFieldMap() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); Map supportedTypes = new HashMap<>(); supportedTypes.put("opt_int32", null); - Map oneLevelNestedMap = Collections.singletonMap("B", supportedTypes); + Map oneLevelNestedMap = Map.of("B", supportedTypes); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, @@ -251,9 +250,9 @@ public void testOptionalFieldMap() { @Test public void testKey() { - xformKey.configure(Collections.emptyMap()); + xformKey.configure(Map.of()); - Map> key = Collections.singletonMap("A", Collections.singletonMap("B", 12)); + Map> key = Map.of("A", Map.of("B", 12)); SourceRecord src = new SourceRecord(null, null, "topic", null, key, null, null); SourceRecord transformed = xformKey.apply(src); @@ -266,14 +265,14 @@ public void testKey() { @Test public void testSchemalessArray() { - xformValue.configure(Collections.emptyMap()); - Object value = Collections.singletonMap("foo", Arrays.asList("bar", Collections.singletonMap("baz", Collections.singletonMap("lfg", "lfg")))); + xformValue.configure(Map.of()); + Object value = Map.of("foo", List.of("bar", Map.of("baz", Map.of("lfg", "lfg")))); assertEquals(value, xformValue.apply(new SourceRecord(null, null, "topic", null, null, null, value)).value()); } @Test public void testArrayWithSchema() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); Schema nestedStructSchema = SchemaBuilder.struct().field("lfg", Schema.STRING_SCHEMA).build(); Schema innerStructSchema = SchemaBuilder.struct().field("baz", nestedStructSchema).build(); Schema structSchema = SchemaBuilder.struct() @@ -284,7 +283,7 @@ public void testArrayWithSchema() { Struct innerValue = new Struct(innerStructSchema); innerValue.put("baz", nestedValue); Struct value = new Struct(structSchema); - value.put("foo", Collections.singletonList(innerValue)); + value.put("foo", List.of(innerValue)); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", null, null, structSchema, value)); assertEquals(value, transformed.value()); assertEquals(structSchema, transformed.valueSchema()); @@ -296,7 +295,7 @@ public void testOptionalAndDefaultValuesNested() { // children should also be optional. Similarly, if the parent Struct has a default value, the default value for // the flattened field - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("req_field", Schema.STRING_SCHEMA); @@ -325,7 +324,7 @@ public void testOptionalAndDefaultValuesNested() { @Test public void tombstoneEventWithoutSchemaShouldPassThrough() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); final SourceRecord record = new SourceRecord(null, null, "test", 0, null, null); @@ -337,7 +336,7 @@ public void tombstoneEventWithoutSchemaShouldPassThrough() { @Test public void tombstoneEventWithSchemaShouldPassThrough() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); final Schema simpleStructSchema = SchemaBuilder.struct().name("name").version(1).doc("doc").field("magic", Schema.OPTIONAL_INT64_SCHEMA).build(); final SourceRecord record = new SourceRecord(null, null, "test", 0, @@ -350,7 +349,7 @@ public void tombstoneEventWithSchemaShouldPassThrough() { @Test public void testMapWithNullFields() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); // Use a LinkedHashMap to ensure the SMT sees entries in a specific order Map value = new LinkedHashMap<>(); @@ -368,7 +367,7 @@ public void testMapWithNullFields() { @Test public void testStructWithNullFields() { - xformValue.configure(Collections.emptyMap()); + xformValue.configure(Map.of()); final Schema structSchema = SchemaBuilder.struct() .field("firstNull", Schema.OPTIONAL_STRING_SCHEMA) diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java index da9e35843252d..f68d7493a75a1 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java @@ -36,10 +36,6 @@ import java.util.List; import java.util.Map; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.data.Schema.STRING_SCHEMA; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -103,8 +99,8 @@ public SourceRecord withSchema(boolean keyTransform) { } private SourceRecord sourceRecord(boolean keyTransform, Schema keyOrValueSchema, Object keyOrValue) { - Map sourcePartition = singletonMap("foo", "bar"); - Map sourceOffset = singletonMap("baz", "quxx"); + Map sourcePartition = Map.of("foo", "bar"); + Map sourceOffset = Map.of("baz", "quxx"); String topic = "topic"; Integer partition = 0; Long timestamp = 0L; @@ -140,7 +136,7 @@ public static List data() { List result = new ArrayList<>(); - for (Boolean testKeyTransform : asList(true, false)) { + for (Boolean testKeyTransform : List.of(true, false)) { result.add( Arguments.of( "basic copy", @@ -149,7 +145,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") @@ -164,7 +160,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -179,7 +175,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("inserted1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") @@ -194,7 +190,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("inserted1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -211,7 +207,7 @@ public static List data() { .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") @@ -226,7 +222,7 @@ public static List data() { .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, + List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -242,7 +238,7 @@ public static List data() { .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), // two headers from the same field - asList("field1", "field1"), asList("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, true, + List.of("field1", "field1"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -259,7 +255,7 @@ public static List data() { .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), // two headers from the same field - asList("field1", "field2"), asList("inserted1", "inserted1"), HeaderFrom.Operation.MOVE, true, + List.of("field1", "field2"), List.of("inserted1", "inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 and field2 got moved .addHeader("header1", STRING_SCHEMA, "existing-value") @@ -274,7 +270,7 @@ public static List data() { .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) .addHeader("header1", STRING_SCHEMA, "existing-value"), - asList("field1", "field2"), asList("inserted1", "inserted2"), HeaderFrom.Operation.COPY, false, + List.of("field1", "field2"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.COPY, false, new RecordBuilder() .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) @@ -290,7 +286,7 @@ public static List data() { .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) .addHeader("header1", STRING_SCHEMA, "existing-value"), - asList("field1", "field2"), asList("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, false, + List.of("field1", "field2"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, false, new RecordBuilder() .addHeader("header1", STRING_SCHEMA, "existing-value") .addHeader("inserted1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") @@ -353,7 +349,7 @@ public void withSchema(String description, @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigExtraHeaderConfig(boolean keyTransform) { - Map config = config(singletonList("foo"), asList("foo", "bar"), HeaderFrom.Operation.COPY, true); + Map config = config(List.of("foo"), List.of("foo", "bar"), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } @@ -361,7 +357,7 @@ public void invalidConfigExtraHeaderConfig(boolean keyTransform) { @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigExtraFieldConfig(boolean keyTransform) { - Map config = config(asList("foo", "bar"), singletonList("foo"), HeaderFrom.Operation.COPY, true); + Map config = config(List.of("foo", "bar"), List.of("foo"), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } @@ -369,7 +365,7 @@ public void invalidConfigExtraFieldConfig(boolean keyTransform) { @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigEmptyHeadersAndFieldsConfig(boolean keyTransform) { - Map config = config(emptyList(), emptyList(), HeaderFrom.Operation.COPY, true); + Map config = config(List.of(), List.of(), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java index b72dddcdd155c..93b69d5413d83 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java @@ -24,7 +24,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -41,18 +40,18 @@ public void teardown() { @Test public void schemaless() { - xform.configure(Collections.singletonMap("field", "magic")); + xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); assertNull(transformedRecord.keySchema()); - assertEquals(Collections.singletonMap("magic", 42), transformedRecord.key()); + assertEquals(Map.of("magic", 42), transformedRecord.key()); } @Test public void withSchema() { - xform.configure(Collections.singletonMap("field", "magic")); + xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, Schema.INT32_SCHEMA, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); @@ -64,7 +63,7 @@ public void withSchema() { @Test public void testSchemalessMapIsMutable() { - xform.configure(Collections.singletonMap("field", "magic")); + xform.configure(Map.of("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 420, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java index cb48fdd810f7d..705f60f5a2e5b 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java @@ -30,7 +30,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Map; @@ -59,7 +58,7 @@ public void teardown() { @Test public void topLevelStructRequired() { - xformValue.configure(Collections.singletonMap("topic.field", "topic_field")); + xformValue.configure(Map.of("topic.field", "topic_field")); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "", 0, Schema.INT32_SCHEMA, 42))); } @@ -118,7 +117,7 @@ public void schemalessInsertConfiguredFields() { xformValue.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, null, null, Collections.singletonMap("magic", 42L), 123L); + null, null, null, Map.of("magic", 42L), 123L); final SourceRecord transformedRecord = xformValue.apply(record); @@ -183,7 +182,7 @@ public void insertKeyFieldsIntoTombstoneEvent() { xformKey.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, Collections.singletonMap("magic", 42L), null, null); + null, Map.of("magic", 42L), null, null); final SourceRecord transformedRecord = xformKey.apply(record); @@ -207,7 +206,7 @@ public void insertIntoNullKeyLeavesRecordUnchanged() { xformKey.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, null, null, Collections.singletonMap("magic", 42L)); + null, null, null, Map.of("magic", 42L)); final SourceRecord transformedRecord = xformKey.apply(record); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java index 20c5b67a50a93..190931b829bf2 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java @@ -28,7 +28,6 @@ import java.util.HashMap; import java.util.Map; -import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -105,8 +104,8 @@ private void assertNonHeaders(SourceRecord original, SourceRecord xformed) { } private SourceRecord sourceRecord(ConnectHeaders headers) { - Map sourcePartition = singletonMap("foo", "bar"); - Map sourceOffset = singletonMap("baz", "quxx"); + Map sourcePartition = Map.of("foo", "bar"); + Map sourceOffset = Map.of("baz", "quxx"); String topic = "topic"; Integer partition = 0; Schema keySchema = null; diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java index 05989af572f4d..3bdc1cd3b4cfa 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java @@ -33,73 +33,69 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; -import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class MaskFieldTest { private static final Schema SCHEMA = SchemaBuilder.struct() - .field("magic", Schema.INT32_SCHEMA) - .field("bool", Schema.BOOLEAN_SCHEMA) - .field("byte", Schema.INT8_SCHEMA) - .field("short", Schema.INT16_SCHEMA) - .field("int", Schema.INT32_SCHEMA) - .field("long", Schema.INT64_SCHEMA) - .field("float", Schema.FLOAT32_SCHEMA) - .field("double", Schema.FLOAT64_SCHEMA) - .field("string", Schema.STRING_SCHEMA) - .field("date", org.apache.kafka.connect.data.Date.SCHEMA) - .field("time", Time.SCHEMA) - .field("timestamp", Timestamp.SCHEMA) - .field("decimal", Decimal.schema(0)) - .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA)) - .field("map", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA)) - .field("withDefault", SchemaBuilder.string().optional().defaultValue("default").build()) - .build(); - private static final Map VALUES = new HashMap<>(); - private static final Struct VALUES_WITH_SCHEMA = new Struct(SCHEMA); + .field("magic", Schema.INT32_SCHEMA) + .field("bool", Schema.BOOLEAN_SCHEMA) + .field("byte", Schema.INT8_SCHEMA) + .field("short", Schema.INT16_SCHEMA) + .field("int", Schema.INT32_SCHEMA) + .field("long", Schema.INT64_SCHEMA) + .field("float", Schema.FLOAT32_SCHEMA) + .field("double", Schema.FLOAT64_SCHEMA) + .field("string", Schema.STRING_SCHEMA) + .field("date", org.apache.kafka.connect.data.Date.SCHEMA) + .field("time", Time.SCHEMA) + .field("timestamp", Timestamp.SCHEMA) + .field("decimal", Decimal.schema(0)) + .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA)) + .field("map", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA)) + .field("withDefault", SchemaBuilder.string().optional().defaultValue("default").build()) + .build(); - static { - VALUES.put("magic", 42); - VALUES.put("bool", true); - VALUES.put("byte", (byte) 42); - VALUES.put("short", (short) 42); - VALUES.put("int", 42); - VALUES.put("long", 42L); - VALUES.put("float", 42f); - VALUES.put("double", 42d); - VALUES.put("string", "55.121.20.20"); - VALUES.put("date", new Date()); - VALUES.put("bigint", new BigInteger("42")); - VALUES.put("bigdec", new BigDecimal("42.0")); - VALUES.put("list", singletonList(42)); - VALUES.put("map", Collections.singletonMap("key", "value")); + private static final Map VALUES = Map.ofEntries( + Map.entry("magic", 42), + Map.entry("bool", true), + Map.entry("byte", (byte) 42), + Map.entry("short", (short) 42), + Map.entry("int", 42), + Map.entry("long", 42L), + Map.entry("float", 42f), + Map.entry("double", 42d), + Map.entry("string", "55.121.20.20"), + Map.entry("date", new Date()), + Map.entry("bigint", new BigInteger("42")), + Map.entry("bigdec", new BigDecimal("42.0")), + Map.entry("list", List.of(42)), + Map.entry("map", Map.of("key", "value")) + ); - VALUES_WITH_SCHEMA.put("magic", 42); - VALUES_WITH_SCHEMA.put("bool", true); - VALUES_WITH_SCHEMA.put("byte", (byte) 42); - VALUES_WITH_SCHEMA.put("short", (short) 42); - VALUES_WITH_SCHEMA.put("int", 42); - VALUES_WITH_SCHEMA.put("long", 42L); - VALUES_WITH_SCHEMA.put("float", 42f); - VALUES_WITH_SCHEMA.put("double", 42d); - VALUES_WITH_SCHEMA.put("string", "hmm"); - VALUES_WITH_SCHEMA.put("date", new Date()); - VALUES_WITH_SCHEMA.put("time", new Date()); - VALUES_WITH_SCHEMA.put("timestamp", new Date()); - VALUES_WITH_SCHEMA.put("decimal", new BigDecimal(42)); - VALUES_WITH_SCHEMA.put("array", Arrays.asList(1, 2, 3)); - VALUES_WITH_SCHEMA.put("map", Collections.singletonMap("what", "what")); - VALUES_WITH_SCHEMA.put("withDefault", null); - } + private static final Struct VALUES_WITH_SCHEMA = new Struct(SCHEMA) + .put("magic", 42) + .put("bool", true) + .put("byte", (byte) 42) + .put("short", (short) 42) + .put("int", 42) + .put("long", 42L) + .put("float", 42f) + .put("double", 42d) + .put("string", "hmm") + .put("date", new Date()) + .put("time", new Date()) + .put("timestamp", new Date()) + .put("decimal", new BigDecimal(42)) + .put("array", List.of(1, 2, 3)) + .put("map", Map.of("what", "what")) + .put("withDefault", null); private static MaskField transform(List fields, String replacement) { final MaskField xform = new MaskField.Value<>(); @@ -117,20 +113,20 @@ private static SinkRecord record(Schema schema, Object value) { private static void checkReplacementWithSchema(String maskField, Object replacement) { SinkRecord record = record(SCHEMA, VALUES_WITH_SCHEMA); - final Struct updatedValue = (Struct) transform(singletonList(maskField), String.valueOf(replacement)).apply(record).value(); + final Struct updatedValue = (Struct) transform(List.of(maskField), String.valueOf(replacement)).apply(record).value(); assertEquals(replacement, updatedValue.get(maskField), "Invalid replacement for " + maskField + " value"); } private static void checkReplacementSchemaless(String maskField, Object replacement) { - checkReplacementSchemaless(singletonList(maskField), replacement); + checkReplacementSchemaless(List.of(maskField), replacement); } @SuppressWarnings("unchecked") private static void checkReplacementSchemaless(List maskFields, Object replacement) { SinkRecord record = record(null, VALUES); final Map updatedValue = (Map) transform(maskFields, String.valueOf(replacement)) - .apply(record) - .value(); + .apply(record) + .value(); for (String maskField : maskFields) { assertEquals(replacement, updatedValue.get(maskField), "Invalid replacement for " + maskField + " value"); } @@ -154,8 +150,8 @@ public void testSchemaless() { assertEquals(new Date(0), updatedValue.get("date")); assertEquals(BigInteger.ZERO, updatedValue.get("bigint")); assertEquals(BigDecimal.ZERO, updatedValue.get("bigdec")); - assertEquals(Collections.emptyList(), updatedValue.get("list")); - assertEquals(Collections.emptyMap(), updatedValue.get("map")); + assertEquals(List.of(), updatedValue.get("list")); + assertEquals(Map.of(), updatedValue.get("map")); } @Test @@ -182,8 +178,8 @@ public void testWithSchema() { assertEquals(new Date(0), updatedValue.get("time")); assertEquals(new Date(0), updatedValue.get("timestamp")); assertEquals(BigDecimal.ZERO, updatedValue.get("decimal")); - assertEquals(Collections.emptyList(), updatedValue.get("array")); - assertEquals(Collections.emptyMap(), updatedValue.get("map")); + assertEquals(List.of(), updatedValue.get("array")); + assertEquals(Map.of(), updatedValue.get("map")); assertEquals(null, updatedValue.getWithoutDefault("withDefault")); } @@ -206,10 +202,10 @@ public void testSchemalessUnsupportedReplacementType() { Class exClass = DataException.class; assertThrows(exClass, () -> checkReplacementSchemaless("date", new Date()), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless(Arrays.asList("int", "date"), new Date()), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless(List.of("int", "date"), new Date()), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("bool", false), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless("list", singletonList("123")), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless("map", Collections.singletonMap("123", "321")), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless("list", List.of("123")), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless("map", Map.of("123", "321")), exMessage); } @Test @@ -231,7 +227,7 @@ public void testWithSchemaUnsupportedReplacementType() { assertThrows(exClass, () -> checkReplacementWithSchema("time", new Date()), exMessage); assertThrows(exClass, () -> checkReplacementWithSchema("timestamp", new Date()), exMessage); - assertThrows(exClass, () -> checkReplacementWithSchema("array", singletonList(123)), exMessage); + assertThrows(exClass, () -> checkReplacementWithSchema("array", List.of(123)), exMessage); } @Test @@ -249,7 +245,7 @@ public void testReplacementTypeMismatch() { assertThrows(exClass, () -> checkReplacementSchemaless("bigdec", "foo"), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("int", new Date()), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("int", new Object()), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless(Arrays.asList("string", "int"), "foo"), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless(List.of("string", "int"), "foo"), exMessage); } @Test @@ -259,17 +255,17 @@ public void testEmptyStringReplacementValue() { @Test public void testNullListAndMapReplacementsAreMutable() { - final List maskFields = Arrays.asList("array", "map"); + final List maskFields = List.of("array", "map"); final Struct updatedValue = (Struct) transform(maskFields, null).apply(record(SCHEMA, VALUES_WITH_SCHEMA)).value(); @SuppressWarnings("unchecked") List actualList = (List) updatedValue.get("array"); - assertEquals(Collections.emptyList(), actualList); + assertEquals(List.of(), actualList); actualList.add(0); - assertEquals(Collections.singletonList(0), actualList); + assertEquals(List.of(0), actualList); @SuppressWarnings("unchecked") Map actualMap = (Map) updatedValue.get("map"); - assertEquals(Collections.emptyMap(), actualMap); + assertEquals(Map.of(), actualMap); actualMap.put("k", "v"); - assertEquals(Collections.singletonMap("k", "v"), actualMap); + assertEquals(Map.of("k", "v"), actualMap); } @Test diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java index 5f0e51559bd14..7f47dd0f8c0f0 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java @@ -30,7 +30,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -57,7 +56,7 @@ public void teardown() { @Test public void schemaNameUpdate() { - xform.configure(Collections.singletonMap("schema.name", "foo")); + xform.configure(Map.of("schema.name", "foo")); final SinkRecord record = new SinkRecord("", 0, null, null, SchemaBuilder.struct().build(), null, 0); final SinkRecord updatedRecord = xform.apply(record); assertEquals("foo", updatedRecord.valueSchema().name()); @@ -65,7 +64,7 @@ public void schemaNameUpdate() { @Test public void schemaVersionUpdate() { - xform.configure(Collections.singletonMap("schema.version", 42)); + xform.configure(Map.of("schema.version", 42)); final SinkRecord record = new SinkRecord("", 0, null, null, SchemaBuilder.struct().build(), null, 0); final SinkRecord updatedRecord = xform.apply(record); assertEquals(42, updatedRecord.valueSchema().version()); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java index a807ad1fc2151..d67d031482dd6 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java @@ -34,7 +34,6 @@ import org.junit.jupiter.params.provider.MethodSource; import java.util.Calendar; -import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Map; @@ -108,13 +107,13 @@ public void teardown() { @Test public void testConfigNoTargetType() { - assertThrows(ConfigException.class, () -> xformValue.configure(Collections.emptyMap())); + assertThrows(ConfigException.class, () -> xformValue.configure(Map.of())); } @Test public void testConfigInvalidTargetType() { assertThrows(ConfigException.class, - () -> xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "invalid"))); + () -> xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "invalid"))); } @Test @@ -136,7 +135,7 @@ public void testConfigValidUnixPrecision() { @Test public void testConfigMissingFormat() { assertThrows(ConfigException.class, - () -> xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "string"))); + () -> xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "string"))); } @Test @@ -151,7 +150,7 @@ public void testConfigInvalidFormat() { @Test public void testSchemalessIdentity() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -160,7 +159,7 @@ public void testSchemalessIdentity() { @Test public void testSchemalessTimestampToDate() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -169,7 +168,7 @@ public void testSchemalessTimestampToDate() { @Test public void testSchemalessTimestampToTime() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -178,7 +177,7 @@ public void testSchemalessTimestampToTime() { @Test public void testSchemalessTimestampToUnix() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -202,7 +201,7 @@ public void testSchemalessTimestampToString() { @Test public void testSchemalessDateToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE.getTime())); assertNull(transformed.valueSchema()); @@ -212,7 +211,7 @@ public void testSchemalessDateToTimestamp() { @Test public void testSchemalessTimeToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(TIME.getTime())); assertNull(transformed.valueSchema()); @@ -222,7 +221,7 @@ public void testSchemalessTimeToTimestamp() { @Test public void testSchemalessUnixToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME_UNIX)); assertNull(transformed.valueSchema()); @@ -246,7 +245,7 @@ public void testSchemalessStringToTimestamp() { @Test public void testWithSchemaIdentity() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -255,7 +254,7 @@ public void testWithSchemaIdentity() { @Test public void testWithSchemaTimestampToDate() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Date.SCHEMA, transformed.valueSchema()); @@ -264,7 +263,7 @@ public void testWithSchemaTimestampToDate() { @Test public void testWithSchemaTimestampToTime() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Time.SCHEMA, transformed.valueSchema()); @@ -273,7 +272,7 @@ public void testWithSchemaTimestampToTime() { @Test public void testWithSchemaTimestampToUnix() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Schema.INT64_SCHEMA, transformed.valueSchema()); @@ -348,7 +347,7 @@ private void testSchemalessNullFieldConversion(String targetType) { @Test public void testWithSchemaDateToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Date.SCHEMA, DATE.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -358,7 +357,7 @@ public void testWithSchemaDateToTimestamp() { @Test public void testWithSchemaTimeToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Time.SCHEMA, TIME.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -368,7 +367,7 @@ public void testWithSchemaTimeToTimestamp() { @Test public void testWithSchemaUnixToTimestamp() { - xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Schema.INT64_SCHEMA, DATE_PLUS_TIME_UNIX)); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -530,11 +529,11 @@ public void testSchemalessFieldConversion() { config.put(TimestampConverter.FIELD_CONFIG, "ts"); xformValue.configure(config); - Object value = Collections.singletonMap("ts", DATE_PLUS_TIME.getTime()); + Object value = Map.of("ts", DATE_PLUS_TIME.getTime()); SourceRecord transformed = xformValue.apply(createRecordSchemaless(value)); assertNull(transformed.valueSchema()); - assertEquals(Collections.singletonMap("ts", DATE.getTime()), transformed.value()); + assertEquals(Map.of("ts", DATE.getTime()), transformed.value()); } @Test @@ -590,7 +589,7 @@ public void testWithSchemaNullFieldWithDefaultConversion(boolean replaceNullWith .build(); assertEquals(expectedSchema, transformed.valueSchema()); - assertEquals(null, ((Struct) transformed.value()).get("ts")); + assertNull(((Struct) transformed.value()).get("ts")); assertEquals("test", ((Struct) transformed.value()).get("other")); } @@ -716,7 +715,7 @@ public void testSchemalessStringToUnix_Seconds() { @Test public void testKey() { - xformKey.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformKey.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, null, DATE_PLUS_TIME.getTime(), null, null)); assertNull(transformed.keySchema()); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java index 43b3b1f384ff5..a98c4406ad9ed 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java @@ -23,7 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Collections; +import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -32,7 +32,7 @@ public class TimestampRouterTest { @BeforeEach public void setup() { xform = new TimestampRouter<>(); - xform.configure(Collections.emptyMap()); // defaults + xform.configure(Map.of()); // defaults } @AfterEach diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java index df528cf518a2a..775bfbabac252 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java @@ -29,7 +29,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -55,7 +54,7 @@ public void teardown() { @Test public void schemaless() { - xform.configure(Collections.singletonMap("fields", "a,b")); + xform.configure(Map.of("fields", "a,b")); final HashMap value = new HashMap<>(); value.put("a", 1); @@ -75,7 +74,7 @@ public void schemaless() { @Test public void withSchema() { - xform.configure(Collections.singletonMap("fields", "a,b")); + xform.configure(Map.of("fields", "a,b")); final Schema valueSchema = SchemaBuilder.struct() .field("a", Schema.INT32_SCHEMA) @@ -106,7 +105,7 @@ public void withSchema() { @Test public void nonExistingField() { - xform.configure(Collections.singletonMap("fields", "not_exist")); + xform.configure(Map.of("fields", "not_exist")); final Schema valueSchema = SchemaBuilder.struct() .field("a", Schema.INT32_SCHEMA) diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java index d400141c95b71..a0c2e2c486171 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java @@ -35,18 +35,18 @@ public class FieldSyntaxVersionTest { @Test void shouldAppendConfigToDef() { ConfigDef def = FieldSyntaxVersion.appendConfigTo(new ConfigDef()); - assertEquals(def.configKeys().size(), 1); + assertEquals(1, def.configKeys().size()); final ConfigDef.ConfigKey configKey = def.configKeys().get("field.syntax.version"); - assertEquals(configKey.name, "field.syntax.version"); - assertEquals(configKey.defaultValue, "V1"); + assertEquals("field.syntax.version", configKey.name); + assertEquals("V1", configKey.defaultValue); } @Test void shouldFailWhenAppendConfigToDefAgain() { ConfigDef def = FieldSyntaxVersion.appendConfigTo(new ConfigDef()); - assertEquals(def.configKeys().size(), 1); + assertEquals(1, def.configKeys().size()); ConfigException e = assertThrows(ConfigException.class, () -> FieldSyntaxVersion.appendConfigTo(def)); - assertEquals(e.getMessage(), "Configuration field.syntax.version is defined twice."); + assertEquals("Configuration field.syntax.version is defined twice.", e.getMessage()); } @ParameterizedTest diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java index e3e3920858d27..39654859edc28 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java @@ -26,13 +26,11 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -58,16 +56,16 @@ public void testNameMayNotBeEmptyInConfig() { @Test public void testConfig() { HasHeaderKey predicate = new HasHeaderKey<>(); - predicate.config().validate(Collections.singletonMap("name", "foo")); + predicate.config().validate(Map.of("name", "foo")); - List configs = predicate.config().validate(Collections.singletonMap("name", "")); - assertEquals(singletonList("Invalid value for configuration name: String must be non-empty"), configs.get(0).errorMessages()); + List configs = predicate.config().validate(Map.of("name", "")); + assertEquals(List.of("Invalid value for configuration name: String must be non-empty"), configs.get(0).errorMessages()); } @Test public void testTest() { HasHeaderKey predicate = new HasHeaderKey<>(); - predicate.configure(Collections.singletonMap("name", "foo")); + predicate.configure(Map.of("name", "foo")); assertTrue(predicate.test(recordWithHeaders("foo"))); assertTrue(predicate.test(recordWithHeaders("foo", "bar"))); @@ -88,18 +86,7 @@ private SourceRecord recordWithHeaders(String... headers) { Arrays.stream(headers).map(TestHeader::new).collect(Collectors.toList())); } - private static class TestHeader implements Header { - - private final String key; - - public TestHeader(String key) { - this.key = key; - } - - @Override - public String key() { - return key; - } + private record TestHeader(String key) implements Header { @Override public Schema schema() { diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java index 3d9ac4dba9048..140d0d6c30f6f 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java @@ -23,7 +23,6 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -61,9 +60,9 @@ public void testPatternIsValidRegexInConfig() { @Test public void testConfig() { TopicNameMatches predicate = new TopicNameMatches<>(); - predicate.config().validate(Collections.singletonMap("pattern", "my-prefix-.*")); + predicate.config().validate(Map.of("pattern", "my-prefix-.*")); - List configs = predicate.config().validate(Collections.singletonMap("pattern", "*")); + List configs = predicate.config().validate(Map.of("pattern", "*")); List errorMsgs = configs.get(0).errorMessages(); assertEquals(1, errorMsgs.size()); assertTrue(errorMsgs.get(0).contains("Invalid regex")); @@ -72,7 +71,7 @@ public void testConfig() { @Test public void testTest() { TopicNameMatches predicate = new TopicNameMatches<>(); - predicate.configure(Collections.singletonMap("pattern", "my-prefix-.*")); + predicate.configure(Map.of("pattern", "my-prefix-.*")); assertTrue(predicate.test(recordWithTopicName("my-prefix-"))); assertTrue(predicate.test(recordWithTopicName("my-prefix-foo"))); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java index 5060346a2d91b..3a9ef48f8dddb 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java @@ -20,7 +20,7 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; +import java.util.List; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -34,11 +34,11 @@ public void testNullList() { @Test public void testEmptyList() { assertThrows(ConfigException.class, - () -> new NonEmptyListValidator().ensureValid("foo", Collections.emptyList())); + () -> new NonEmptyListValidator().ensureValid("foo", List.of())); } @Test public void testValidList() { - new NonEmptyListValidator().ensureValid("foo", Collections.singletonList("foo")); + new NonEmptyListValidator().ensureValid("foo", List.of("foo")); } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImpl.java new file mode 100644 index 0000000000000..3a8b74343267e --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImpl.java @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.record.ControlRecordType; +import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MutableRecordBatch; +import org.apache.kafka.common.record.Record; +import org.apache.kafka.common.record.Records; +import org.apache.kafka.common.requests.TransactionResult; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.storage.log.FetchIsolation; +import org.apache.kafka.server.util.KafkaScheduler; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.UnifiedLog; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; + +/** + * Coordinator loader which reads records from a partition and replays them + * to a group coordinator. + * + * @param The record type. + */ +public class CoordinatorLoaderImpl implements CoordinatorLoader { + + /** + * The interval between updating the last committed offset during loading, in offsets. Smaller + * values commit more often at the expense of loading times when the workload is simple and does + * not create collections that need to participate in {@link CoordinatorPlayback} snapshotting. + * Larger values commit less often and allow more temporary data to accumulate before the next + * commit when the workload creates many temporary collections that need to be snapshotted. + * + * The value of 16,384 was chosen as a trade-off between the performance of these two workloads. + * + * When changing this value, please run the GroupCoordinatorShardLoadingBenchmark to evaluate + * the relative change in performance. + */ + public static final long DEFAULT_COMMIT_INTERVAL_OFFSETS = 16384; + + private static final Logger LOG = LoggerFactory.getLogger(CoordinatorLoaderImpl.class); + + private final Time time; + private final Function> partitionLogSupplier; + private final Function> partitionLogEndOffsetSupplier; + private final Deserializer deserializer; + private final int loadBufferSize; + private final long commitIntervalOffsets; + + private final AtomicBoolean isRunning = new AtomicBoolean(true); + private final KafkaScheduler scheduler = new KafkaScheduler(1); + + public CoordinatorLoaderImpl( + Time time, + Function> partitionLogSupplier, + Function> partitionLogEndOffsetSupplier, + Deserializer deserializer, + int loadBufferSize, + long commitIntervalOffsets + ) { + this.time = time; + this.partitionLogSupplier = partitionLogSupplier; + this.partitionLogEndOffsetSupplier = partitionLogEndOffsetSupplier; + this.deserializer = deserializer; + this.loadBufferSize = loadBufferSize; + this.commitIntervalOffsets = commitIntervalOffsets; + this.scheduler.startup(); + } + + /** + * Loads the coordinator by reading all the records from the TopicPartition + * and applying them to the Replayable object. + * + * @param tp The TopicPartition to read from. + * @param coordinator The object to apply records to. + */ + @Override + public CompletableFuture load(TopicPartition tp, CoordinatorPlayback coordinator) { + final CompletableFuture future = new CompletableFuture<>(); + long startTimeMs = time.milliseconds(); + try { + ScheduledFuture result = scheduler.scheduleOnce(String.format("Load coordinator from %s", tp), + () -> doLoad(tp, coordinator, future, startTimeMs)); + if (result.isCancelled()) { + future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")); + } + } catch (Exception e) { + future.completeExceptionally(e); + } + return future; + } + + private void doLoad( + TopicPartition tp, + CoordinatorPlayback coordinator, + CompletableFuture future, + long startTimeMs + ) { + long schedulerQueueTimeMs = time.milliseconds() - startTimeMs; + try { + Optional logOpt = partitionLogSupplier.apply(tp); + if (logOpt.isEmpty()) { + future.completeExceptionally(new NotLeaderOrFollowerException( + "Could not load records from " + tp + " because the log does not exist.")); + return; + } + + UnifiedLog log = logOpt.get(); + + // Buffer may not be needed if records are read from memory. + ByteBuffer buffer = ByteBuffer.allocate(0); + long currentOffset = log.logStartOffset(); + LoadStats stats = new LoadStats(); + + long lastCommittedOffset = -1L; + while (shouldFetchNextBatch(currentOffset, logEndOffset(tp), stats.readAtLeastOneRecord)) { + FetchDataInfo fetchDataInfo = log.read(currentOffset, loadBufferSize, FetchIsolation.LOG_END, true); + + stats.readAtLeastOneRecord = fetchDataInfo.records.sizeInBytes() > 0; + + // Reuses a potentially larger buffer by updating it when reading from FileRecords. + MemoryRecords memoryRecords = toReadableMemoryRecords(tp, fetchDataInfo.records, buffer); + if (fetchDataInfo.records instanceof FileRecords) { + buffer = memoryRecords.buffer(); + } + + ReplayResult replayResult = processMemoryRecords(tp, log, memoryRecords, coordinator, stats, currentOffset, lastCommittedOffset); + currentOffset = replayResult.nextOffset; + lastCommittedOffset = replayResult.lastCommittedOffset; + } + + long endTimeMs = time.milliseconds(); + + if (logEndOffset(tp) == -1L) { + future.completeExceptionally(new NotLeaderOrFollowerException( + String.format("Stopped loading records from %s because the partition is not online or is no longer the leader.", tp))); + } else if (isRunning.get()) { + future.complete(new LoadSummary(startTimeMs, endTimeMs, schedulerQueueTimeMs, stats.numRecords, stats.numBytes)); + } else { + future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")); + } + } catch (Throwable ex) { + future.completeExceptionally(ex); + } + } + + private long logEndOffset(TopicPartition tp) { + return partitionLogEndOffsetSupplier.apply(tp).orElse(-1L); + } + + /** + * Returns true if it's still valid to fetch the next batch of records. + *

    + * This method ensures fetching continues only under safe and meaningful conditions: + *

      + *
    • The current offset is less than the log end offset.
    • + *
    • At least one record was read in the previous fetch. This ensures that fetching stops even if the + * current offset remains smaller than the log end offset but the log is empty. This could happen with compacted topics.
    • + *
    • The log end offset is not -1L, which ensures the partition is online and is still the leader.
    • + *
    • The loader is still running.
    • + *
    + */ + private boolean shouldFetchNextBatch(long currentOffset, long logEndOffset, boolean readAtLeastOneRecord) { + return currentOffset < logEndOffset && readAtLeastOneRecord && isRunning.get(); + } + + private MemoryRecords toReadableMemoryRecords(TopicPartition tp, Records records, ByteBuffer buffer) throws IOException { + if (records instanceof MemoryRecords memoryRecords) { + return memoryRecords; + } else if (records instanceof FileRecords fileRecords) { + int sizeInBytes = fileRecords.sizeInBytes(); + int bytesNeeded = Math.max(loadBufferSize, sizeInBytes); + + // "minOneMessage = true in the above log.read() means that the buffer may need to + // be grown to ensure progress can be made. + if (buffer.capacity() < bytesNeeded) { + if (loadBufferSize < bytesNeeded) { + LOG.warn("Loaded metadata from {} with buffer larger ({} bytes) than" + + " configured buffer size ({} bytes).", tp, bytesNeeded, loadBufferSize); + } + + buffer = ByteBuffer.allocate(bytesNeeded); + } else { + buffer.clear(); + } + + fileRecords.readInto(buffer, 0); + return MemoryRecords.readableRecords(buffer); + } else { + throw new IllegalArgumentException("Unsupported record type: " + records.getClass()); + } + } + + private ReplayResult processMemoryRecords( + TopicPartition tp, + UnifiedLog log, + MemoryRecords memoryRecords, + CoordinatorPlayback coordinator, + LoadStats loadStats, + long currentOffset, + long lastCommittedOffset + ) { + for (MutableRecordBatch batch : memoryRecords.batches()) { + if (batch.isControlBatch()) { + for (Record record : batch) { + loadStats.numRecords++; + + ControlRecordType controlRecord = ControlRecordType.parse(record.key()); + if (controlRecord == ControlRecordType.COMMIT) { + if (LOG.isTraceEnabled()) { + LOG.trace("Replaying end transaction marker from {} at offset {} to commit" + + " transaction with producer id {} and producer epoch {}.", + tp, record.offset(), batch.producerId(), batch.producerEpoch()); + } + coordinator.replayEndTransactionMarker( + batch.producerId(), + batch.producerEpoch(), + TransactionResult.COMMIT + ); + } else if (controlRecord == ControlRecordType.ABORT) { + if (LOG.isTraceEnabled()) { + LOG.trace("Replaying end transaction marker from {} at offset {} to abort" + + " transaction with producer id {} and producer epoch {}.", + tp, record.offset(), batch.producerId(), batch.producerEpoch()); + } + coordinator.replayEndTransactionMarker( + batch.producerId(), + batch.producerEpoch(), + TransactionResult.ABORT + ); + } + } + } else { + for (Record record : batch) { + loadStats.numRecords++; + + Optional coordinatorRecordOpt = Optional.empty(); + try { + coordinatorRecordOpt = Optional.ofNullable(deserializer.deserialize(record.key(), record.value())); + } catch (Deserializer.UnknownRecordTypeException ex) { + LOG.warn("Unknown record type {} while loading offsets and group metadata from {}." + + " Ignoring it. It could be a left over from an aborted upgrade.", ex.unknownType(), tp); + } catch (RuntimeException ex) { + String msg = String.format("Deserializing record %s from %s failed.", record, tp); + LOG.error(msg, ex); + throw new RuntimeException(msg, ex); + } + + coordinatorRecordOpt.ifPresent(coordinatorRecord -> { + try { + if (LOG.isTraceEnabled()) { + LOG.trace("Replaying record {} from {} at offset {} with producer id {}" + + " and producer epoch {}.", coordinatorRecord, tp, record.offset(), batch.producerId(), batch.producerEpoch()); + } + coordinator.replay( + record.offset(), + batch.producerId(), + batch.producerEpoch(), + coordinatorRecord + ); + } catch (RuntimeException ex) { + String msg = String.format("Replaying record %s from %s at offset %d with producer id %d and" + + " producer epoch %d failed.", coordinatorRecord, tp, record.offset(), + batch.producerId(), batch.producerEpoch()); + LOG.error(msg, ex); + throw new RuntimeException(msg, ex); + } + }); + } + } + + // Note that the high watermark can be greater than the current offset but as we load more records + // the current offset will eventually surpass the high watermark. Also note that the high watermark + // will continue to advance while loading. + currentOffset = batch.nextOffset(); + long currentHighWatermark = log.highWatermark(); + if (currentOffset >= currentHighWatermark) { + coordinator.updateLastWrittenOffset(currentOffset); + + if (currentHighWatermark > lastCommittedOffset) { + coordinator.updateLastCommittedOffset(currentHighWatermark); + lastCommittedOffset = currentHighWatermark; + } + } else if (currentOffset - lastCommittedOffset >= commitIntervalOffsets) { + coordinator.updateLastWrittenOffset(currentOffset); + coordinator.updateLastCommittedOffset(currentOffset); + lastCommittedOffset = currentOffset; + } + } + loadStats.numBytes += memoryRecords.sizeInBytes(); + return new ReplayResult(currentOffset, lastCommittedOffset); + } + + /** + * Closes the loader. + */ + @Override + public void close() throws Exception { + if (!isRunning.compareAndSet(true, false)) { + LOG.warn("Coordinator loader is already shutting down."); + return; + } + scheduler.shutdown(); + } + + /** + * A helper class to track key metrics during the data loading operation. + */ + private static class LoadStats { + private long numRecords = 0L; + private long numBytes = 0L; + private boolean readAtLeastOneRecord = true; + + @Override + public String toString() { + return "LoadStats(" + + "numRecords=" + numRecords + + ", numBytes=" + numBytes + + ", readAtLeastOneRecord=" + readAtLeastOneRecord + + ')'; + } + } + + private record ReplayResult(long nextOffset, long lastCommittedOffset) { } +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataDelta.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataDelta.java new file mode 100644 index 0000000000000..c1fa7f35cc251 --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataDelta.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; + +import java.util.Collection; +import java.util.Set; + +/** + * Provides metadata deltas to Coordinators (GroupCoordinator, ShareCoordinator, etc) such as changed topics and deleted topics + * Implementations should be immutable. + */ +public interface CoordinatorMetadataDelta { + + CoordinatorMetadataDelta EMPTY = emptyDelta(); + + Collection createdTopicIds(); + + Collection changedTopicIds(); + + Set deletedTopicIds(); + + /** + * Returns the previous image of the coordinator metadata. + * This image is a snapshot of the metadata before the delta occurred. + */ + CoordinatorMetadataImage image(); + + private static CoordinatorMetadataDelta emptyDelta() { + return new CoordinatorMetadataDelta() { + @Override + public Collection createdTopicIds() { + return Set.of(); + } + + @Override + public Collection changedTopicIds() { + return Set.of(); + } + + @Override + public Set deletedTopicIds() { + return Set.of(); + } + + @Override + public CoordinatorMetadataImage image() { + return CoordinatorMetadataImage.EMPTY; + } + }; + } +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataImage.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataImage.java new file mode 100644 index 0000000000000..d294bddf51bae --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetadataImage.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; + +import java.util.List; +import java.util.Optional; +import java.util.Set; + +/** + * Provides metadata to Coordinators (GroupCoordinator, ShareCoordinator, etc) such as topics, partitions, and their configurations. + * Implementations should be thread-safe and immutable. + */ +public interface CoordinatorMetadataImage { + CoordinatorMetadataImage EMPTY = emptyImage(); + + Set topicIds(); + + Set topicNames(); + + Optional topicMetadata(String topicName); + + Optional topicMetadata(Uuid topicId); + + CoordinatorMetadataDelta emptyDelta(); + + long version(); + + boolean isEmpty(); + + /** + * Metadata about a particular topic + */ + interface TopicMetadata { + String name(); + + Uuid id(); + + int partitionCount(); + + List partitionRacks(int partitionId); + } + + private static CoordinatorMetadataImage emptyImage() { + + return new CoordinatorMetadataImage() { + @Override + public Set topicIds() { + return Set.of(); + } + + @Override + public Set topicNames() { + return Set.of(); + } + + @Override + public Optional topicMetadata(String topicName) { + return Optional.empty(); + } + + @Override + public Optional topicMetadata(Uuid topicId) { + return Optional.empty(); + } + + @Override + public CoordinatorMetadataDelta emptyDelta() { + return CoordinatorMetadataDelta.EMPTY; + } + + @Override + public long version() { + return 0L; + } + + @Override + public boolean isEmpty() { + return true; + } + }; + } + +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java index 10089a7145928..45d9d37348774 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java @@ -21,7 +21,7 @@ /** * A CoordinatorMetricsShard is mapped to a single CoordinatorShard. The metrics shard records sensors that have been * defined in {@link CoordinatorMetrics}. Coordinator specific gauges and related methods are exposed in the - * implementation of CoordinatorMetricsShard (i.e. {@link GroupCoordinatorMetricsShard}). + * implementation of CoordinatorMetricsShard (such as GroupCoordinatorMetricsShard and ShareCoordinatorMetricsShard). * * For sensors, each shard individually records the observed values. */ diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java index 0c70397874a48..04d7da690f575 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java @@ -48,36 +48,25 @@ public static OUT handleOperationException( ) { ApiError apiError = ApiError.fromThrowable(exception); - switch (apiError.error()) { - case UNKNOWN_SERVER_ERROR: + return switch (apiError.error()) { + case UNKNOWN_SERVER_ERROR -> { log.error("Operation {} with {} hit an unexpected exception: {}.", - operationName, operationInput, exception.getMessage(), exception); - return handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); - - case NETWORK_EXCEPTION: + operationName, operationInput, exception.getMessage(), exception); + yield handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); + } + case NETWORK_EXCEPTION -> // When committing offsets transactionally, we now verify the transaction with the // transaction coordinator. Verification can fail with `NETWORK_EXCEPTION`, a // retriable error which older clients may not expect and retry correctly. We // translate the error to `COORDINATOR_LOAD_IN_PROGRESS` because it causes clients // to retry the request without an unnecessary coordinator lookup. - return handler.apply(Errors.COORDINATOR_LOAD_IN_PROGRESS, null); - - case UNKNOWN_TOPIC_OR_PARTITION: - case NOT_ENOUGH_REPLICAS: - case REQUEST_TIMED_OUT: - return handler.apply(Errors.COORDINATOR_NOT_AVAILABLE, null); - - case NOT_LEADER_OR_FOLLOWER: - case KAFKA_STORAGE_ERROR: - return handler.apply(Errors.NOT_COORDINATOR, null); - - case MESSAGE_TOO_LARGE: - case RECORD_LIST_TOO_LARGE: - case INVALID_FETCH_SIZE: - return handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); - - default: - return handler.apply(apiError.error(), apiError.message()); - } + handler.apply(Errors.COORDINATOR_LOAD_IN_PROGRESS, null); + case UNKNOWN_TOPIC_OR_PARTITION, NOT_ENOUGH_REPLICAS, REQUEST_TIMED_OUT -> + handler.apply(Errors.COORDINATOR_NOT_AVAILABLE, null); + case NOT_LEADER_OR_FOLLOWER, KAFKA_STORAGE_ERROR -> handler.apply(Errors.NOT_COORDINATOR, null); + case MESSAGE_TOO_LARGE, RECORD_LIST_TOO_LARGE, INVALID_FETCH_SIZE -> + handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); + default -> handler.apply(apiError.error(), apiError.message()); + }; } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java index 79d2483078809..f4f22b0e36341 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java @@ -62,12 +62,12 @@ void replayEndTransactionMarker( * * @param offset the offset of the last record in the batch plus one. */ - void updateLastWrittenOffset(Long offset); + void updateLastWrittenOffset(long offset); /** * Called when the high watermark advances. * * @param offset The offset of the new high watermark. */ - void updateLastCommittedOffset(Long offset); + void updateLastCommittedOffset(long offset); } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java index 19567344f2d36..52d3f27f3cf7c 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java @@ -39,8 +39,6 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.deferred.DeferredEvent; import org.apache.kafka.deferred.DeferredEventQueue; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; import org.apache.kafka.server.util.timer.Timer; import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.LogConfig; @@ -69,6 +67,7 @@ import java.util.function.Consumer; import java.util.stream.Collectors; +import static java.lang.Math.min; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.CoordinatorWriteEvent.NOT_QUEUED; /** @@ -758,8 +757,14 @@ private void freeCurrentBatch() { // Cancel the linger timeout. currentBatch.lingerTimeoutTask.ifPresent(TimerTask::cancel); - // Release the buffer. - bufferSupplier.release(currentBatch.buffer); + // Release the buffer only if it is not larger than the maxBatchSize. + int maxBatchSize = partitionWriter.config(tp).maxMessageSize(); + + if (currentBatch.builder.buffer().capacity() <= maxBatchSize) { + bufferSupplier.release(currentBatch.builder.buffer()); + } else if (currentBatch.buffer.capacity() <= maxBatchSize) { + bufferSupplier.release(currentBatch.buffer); + } currentBatch = null; } @@ -859,7 +864,7 @@ private void maybeAllocateNewBatch( LogConfig logConfig = partitionWriter.config(tp); int maxBatchSize = logConfig.maxMessageSize(); long prevLastWrittenOffset = coordinator.lastWrittenOffset(); - ByteBuffer buffer = bufferSupplier.get(maxBatchSize); + ByteBuffer buffer = bufferSupplier.get(min(INITIAL_BUFFER_SIZE, maxBatchSize)); MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, @@ -1909,20 +1914,15 @@ public void onHighWatermarkUpdated( } /** - * 16KB. Used for initial buffer size for write operations. + * 512KB. Used for initial buffer size for write operations. */ - static final int MIN_BUFFER_SIZE = 16384; + static final int INITIAL_BUFFER_SIZE = 512 * 1024; /** * The log prefix. */ private final String logPrefix; - /** - * The log context. - */ - private final LogContext logContext; - /** * The logger. */ @@ -2009,7 +2009,7 @@ public void onHighWatermarkUpdated( /** * The latest known metadata image. */ - private volatile MetadataImage metadataImage = MetadataImage.EMPTY; + private volatile CoordinatorMetadataImage metadataImage = CoordinatorMetadataImage.EMPTY; /** * Constructor. @@ -2049,7 +2049,6 @@ private CoordinatorRuntime( ExecutorService executorService ) { this.logPrefix = logPrefix; - this.logContext = logContext; this.log = logContext.logger(CoordinatorRuntime.class); this.time = time; this.timer = timer; @@ -2474,18 +2473,18 @@ public void scheduleUnloadOperation( * @param delta The metadata delta. */ public void onNewMetadataImage( - MetadataImage newImage, - MetadataDelta delta + CoordinatorMetadataImage newImage, + CoordinatorMetadataDelta delta ) { throwIfNotRunning(); - log.debug("Scheduling applying of a new metadata image with offset {}.", newImage.offset()); + log.debug("Scheduling applying of a new metadata image with version {}.", newImage.version()); // Update global image. metadataImage = newImage; // Push an event for each coordinator. coordinators.keySet().forEach(tp -> { - scheduleInternalOperation("UpdateImage(tp=" + tp + ", offset=" + newImage.offset() + ")", tp, () -> { + scheduleInternalOperation("UpdateImage(tp=" + tp + ", version=" + newImage.version() + ")", tp, () -> { CoordinatorContext context = coordinators.get(tp); if (context != null) { context.lock.lock(); @@ -2493,18 +2492,18 @@ public void onNewMetadataImage( if (context.state == CoordinatorState.ACTIVE) { // The new image can be applied to the coordinator only if the coordinator // exists and is in the active state. - log.debug("Applying new metadata image with offset {} to {}.", newImage.offset(), tp); + log.debug("Applying new metadata image with version {} to {}.", newImage.version(), tp); context.coordinator.onNewMetadataImage(newImage, delta); } else { - log.debug("Ignored new metadata image with offset {} for {} because the coordinator is not active.", - newImage.offset(), tp); + log.debug("Ignored new metadata image with version {} for {} because the coordinator is not active.", + newImage.version(), tp); } } finally { context.lock.unlock(); } } else { - log.debug("Ignored new metadata image with offset {} for {} because the coordinator does not exist.", - newImage.offset(), tp); + log.debug("Ignored new metadata image with version {} for {} because the coordinator does not exist.", + newImage.version(), tp); } }); }); diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java index a95f590c5b26b..af775c7c45118 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java @@ -149,7 +149,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { metrics.addMetric(numPartitionsActive, (Gauge) (config, now) -> numPartitionsActiveCounter.get()); metrics.addMetric(numPartitionsFailed, (Gauge) (config, now) -> numPartitionsFailedCounter.get()); - this.partitionLoadSensor = metrics.sensor("GroupPartitionLoadTime"); + this.partitionLoadSensor = metrics.sensor(this.metricsGroup + "-PartitionLoadTime"); this.partitionLoadSensor.add( metrics.metricName( "partition-load-time-max", @@ -163,7 +163,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The average time it took to load the partitions in the last 30 sec." ), new Avg()); - this.threadIdleSensor = metrics.sensor("ThreadIdleRatio"); + this.threadIdleSensor = metrics.sensor(this.metricsGroup + "-ThreadIdleRatio"); this.threadIdleSensor.add( metrics.metricName( "thread-idle-ratio-avg", @@ -178,7 +178,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event queue time in milliseconds" ) ); - this.eventQueueTimeSensor = metrics.sensor("EventQueueTime"); + this.eventQueueTimeSensor = metrics.sensor(this.metricsGroup + "-EventQueueTime"); this.eventQueueTimeSensor.add(eventQueueTimeHistogram); KafkaMetricHistogram eventProcessingTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -187,7 +187,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event processing time in milliseconds" ) ); - this.eventProcessingTimeSensor = metrics.sensor("EventProcessingTime"); + this.eventProcessingTimeSensor = metrics.sensor(this.metricsGroup + "-EventProcessingTime"); this.eventProcessingTimeSensor.add(eventProcessingTimeHistogram); KafkaMetricHistogram eventPurgatoryTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -196,7 +196,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event purgatory time in milliseconds" ) ); - this.eventPurgatoryTimeSensor = metrics.sensor("EventPurgatoryTime"); + this.eventPurgatoryTimeSensor = metrics.sensor(this.metricsGroup + "-EventPurgatoryTime"); this.eventPurgatoryTimeSensor.add(eventPurgatoryTimeHistogram); KafkaMetricHistogram flushTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -205,7 +205,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " flush time in milliseconds" ) ); - this.flushTimeSensor = metrics.sensor("FlushTime"); + this.flushTimeSensor = metrics.sensor(this.metricsGroup + "-FlushTime"); this.flushTimeSensor.add(flushTimeHistogram); } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java index 6b0f40ddf3323..7734b12751525 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java @@ -17,8 +17,6 @@ package org.apache.kafka.coordinator.common.runtime; import org.apache.kafka.common.requests.TransactionResult; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; /** * CoordinatorShard is basically a replicated state machine managed by the @@ -32,16 +30,16 @@ public interface CoordinatorShard { * * @param newImage The metadata image. */ - default void onLoaded(MetadataImage newImage) {} + default void onLoaded(CoordinatorMetadataImage newImage) {} /** - * A new metadata image is available. This is only called after {@link CoordinatorShard#onLoaded(MetadataImage)} + * A new metadata image is available. This is only called after {@link CoordinatorShard#onLoaded(CoordinatorMetadataImage)} * is called to signal that the coordinator has been fully loaded. * * @param newImage The new metadata image. * @param delta The delta image. */ - default void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) {} + default void onNewMetadataImage(CoordinatorMetadataImage newImage, CoordinatorMetadataDelta delta) {} /** * The coordinator has been unloaded. This is used to apply diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java index e6386e35f9d05..985fb48834430 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java @@ -32,8 +32,8 @@ import java.util.concurrent.locks.ReentrantLock; /** - * A concurrent event accumulator which group events per key and ensure that only one - * event with a given key can't be processed concurrently. + * A concurrent event accumulator which groups events per key and ensures that only one + * event with a given key can be processed concurrently. * * This class is threadsafe. * @@ -90,7 +90,7 @@ public interface Event { private int size; /** - * A boolean indicated whether the accumulator is closed. + * A boolean indicating whether the accumulator is closed. */ private boolean closed; @@ -174,7 +174,7 @@ public T poll() { /** * Returns the next {{@link Event}} available. This method blocks for the provided - * time and returns null of no event is available. + * time and returns null if no event is available. * * @param timeout The timeout. * @param unit The timeout unit. diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDelta.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDelta.java new file mode 100644 index 0000000000000..8e340d81c887b --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDelta.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.image.MetadataDelta; + +import java.util.Collection; +import java.util.Set; + +/** + * An implementation of {@link CoordinatorMetadataDelta} that wraps the KRaft MetadataDelta. + */ +public class KRaftCoordinatorMetadataDelta implements CoordinatorMetadataDelta { + + final MetadataDelta metadataDelta; + + public KRaftCoordinatorMetadataDelta(MetadataDelta metadataDelta) { + this.metadataDelta = metadataDelta; + } + + @Override + public Collection createdTopicIds() { + if (metadataDelta == null || metadataDelta.topicsDelta() == null) { + return Set.of(); + } + return metadataDelta.topicsDelta().createdTopicIds(); + } + + @Override + public Collection changedTopicIds() { + if (metadataDelta == null || metadataDelta.topicsDelta() == null) { + return Set.of(); + } + return metadataDelta.topicsDelta().changedTopics().keySet(); + } + + @Override + public Set deletedTopicIds() { + if (metadataDelta == null || metadataDelta.topicsDelta() == null) { + return Set.of(); + } + return metadataDelta.topicsDelta().deletedTopicIds(); + } + + @Override + public CoordinatorMetadataImage image() { + return new KRaftCoordinatorMetadataImage(metadataDelta.image()); + } + + @Override + public String toString() { + return metadataDelta.toString(); + } + + @Override + public boolean equals(Object o) { + if (o == null || !o.getClass().equals(this.getClass())) return false; + KRaftCoordinatorMetadataDelta other = (KRaftCoordinatorMetadataDelta) o; + return metadataDelta.equals(other.metadataDelta); + } + + @Override + public int hashCode() { + return metadataDelta.hashCode(); + } +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImage.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImage.java new file mode 100644 index 0000000000000..c0284a4aed6b4 --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImage.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.image.ClusterImage; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.TopicImage; +import org.apache.kafka.metadata.BrokerRegistration; +import org.apache.kafka.metadata.PartitionRegistration; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +/** + * An implementation of {@link CoordinatorMetadataImage} that wraps the KRaft MetadataImage. + */ +public class KRaftCoordinatorMetadataImage implements CoordinatorMetadataImage { + + private final MetadataImage metadataImage; + + public KRaftCoordinatorMetadataImage(MetadataImage metadataImage) { + this.metadataImage = metadataImage; + } + + @Override + public Set topicIds() { + return Collections.unmodifiableSet(metadataImage.topics().topicsById().keySet()); + } + + @Override + public Set topicNames() { + return Collections.unmodifiableSet(metadataImage.topics().topicsByName().keySet()); + } + + @Override + public Optional topicMetadata(Uuid topicId) { + TopicImage topicImage = metadataImage.topics().getTopic(topicId); + if (topicImage == null) return Optional.empty(); + + ClusterImage clusterImage = metadataImage.cluster(); + if (clusterImage == null) return Optional.empty(); + + return Optional.of(new KraftTopicMetadata(topicImage, clusterImage)); + } + + @Override + public Optional topicMetadata(String topicName) { + TopicImage topicImage = metadataImage.topics().getTopic(topicName); + if (topicImage == null) return Optional.empty(); + + ClusterImage clusterImage = metadataImage.cluster(); + if (clusterImage == null) return Optional.empty(); + + return Optional.of(new KraftTopicMetadata(topicImage, clusterImage)); + } + + @Override + public CoordinatorMetadataDelta emptyDelta() { + return new KRaftCoordinatorMetadataDelta(new MetadataDelta(metadataImage)); + } + + @Override + public long version() { + return metadataImage.offset(); + } + + @Override + public boolean isEmpty() { + return metadataImage.isEmpty(); + } + + @Override + public String toString() { + return metadataImage.toString(); + } + + @Override + public boolean equals(Object o) { + if (o == null || !o.getClass().equals(this.getClass())) return false; + KRaftCoordinatorMetadataImage other = (KRaftCoordinatorMetadataImage) o; + return metadataImage.equals(other.metadataImage); + } + + @Override + public int hashCode() { + return metadataImage.hashCode(); + } + + public static class KraftTopicMetadata implements TopicMetadata { + private final TopicImage topicImage; + private final ClusterImage clusterImage; + + public KraftTopicMetadata(TopicImage topicImage, ClusterImage clusterImage) { + this.topicImage = topicImage; + this.clusterImage = clusterImage; + } + + @Override + public String name() { + return topicImage.name(); + } + + @Override + public Uuid id() { + return topicImage.id(); + } + + @Override + public int partitionCount() { + return topicImage.partitions().size(); + } + + @Override + public List partitionRacks(int partition) { + List racks = new ArrayList<>(); + PartitionRegistration partitionRegistration = topicImage.partitions().get(partition); + if (partitionRegistration != null) { + for (int replicaId : partitionRegistration.replicas) { + BrokerRegistration broker = clusterImage.broker(replicaId); + if (broker != null) { + broker.rack().ifPresent(racks::add); + } + } + return racks; + } else { + return List.of(); + } + } + } +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java index 3aa622cc98b08..278373e6842f3 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.timeline.SnapshotRegistry; @@ -62,7 +61,7 @@ public class SnapshottableCoordinator, U> implemen */ private long lastCommittedOffset; - SnapshottableCoordinator( + public SnapshottableCoordinator( LogContext logContext, SnapshotRegistry snapshotRegistry, S coordinator, @@ -138,7 +137,7 @@ public synchronized void replayEndTransactionMarker( * @param offset The new last written offset. */ @Override - public synchronized void updateLastWrittenOffset(Long offset) { + public synchronized void updateLastWrittenOffset(long offset) { if (offset <= lastWrittenOffset) { throw new IllegalStateException("New last written offset " + offset + " of " + tp + " must be greater than " + lastWrittenOffset + "."); @@ -157,7 +156,7 @@ public synchronized void updateLastWrittenOffset(Long offset) { * @param offset The new last committed offset. */ @Override - public synchronized void updateLastCommittedOffset(Long offset) { + public synchronized void updateLastCommittedOffset(long offset) { if (offset < lastCommittedOffset) { throw new IllegalStateException("New committed offset " + offset + " of " + tp + " must be greater than or equal to " + lastCommittedOffset + "."); @@ -179,7 +178,7 @@ public synchronized void updateLastCommittedOffset(Long offset) { * * @param newImage The metadata image. */ - synchronized void onLoaded(MetadataImage newImage) { + synchronized void onLoaded(CoordinatorMetadataImage newImage) { this.coordinator.onLoaded(newImage); } @@ -207,7 +206,7 @@ synchronized long lastWrittenOffset() { * @param newImage The new metadata image. * @param delta The delta image. */ - synchronized void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { + synchronized void onNewMetadataImage(CoordinatorMetadataImage newImage, CoordinatorMetadataDelta delta) { this.coordinator.onNewMetadataImage(newImage, delta); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImplTest.java new file mode 100644 index 0000000000000..11cdab83cac1c --- /dev/null +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoaderImplTest.java @@ -0,0 +1,750 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.record.ControlRecordType; +import org.apache.kafka.common.record.EndTransactionMarker; +import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.requests.TransactionResult; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.storage.log.FetchIsolation; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.LogOffsetMetadata; +import org.apache.kafka.storage.internals.log.UnifiedLog; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.mockito.ArgumentMatchers; +import org.mockito.invocation.InvocationOnMock; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.apache.kafka.test.TestUtils.assertFutureThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@SuppressWarnings({"unchecked", "resource"}) +@Timeout(60) +class CoordinatorLoaderImplTest { + + private static class StringKeyValueDeserializer implements Deserializer> { + + @Override + public Map.Entry deserialize(ByteBuffer key, ByteBuffer value) throws RuntimeException { + return Map.entry( + StandardCharsets.UTF_8.decode(key).toString(), + StandardCharsets.UTF_8.decode(value).toString() + ); + } + } + + @Test + void testNonexistentPartition() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + Function> partitionLogSupplier = partition -> Optional.empty(); + Function> partitionLogEndOffsetSupplier = partition -> Optional.empty(); + Deserializer> serde = mock(Deserializer.class); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + assertFutureThrows(NotLeaderOrFollowerException.class, loader.load(tp, coordinator)); + } + } + + @Test + void testLoadingIsRejectedWhenClosed() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + Function> partitionLogSupplier = partition -> Optional.of(mock(UnifiedLog.class)); + Function> partitionLogEndOffsetSupplier = partition -> Optional.empty(); + Deserializer> serde = mock(Deserializer.class); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + loader.close(); + assertFutureThrows(RuntimeException.class, loader.load(tp, coordinator)); + } + } + + @Test + void testLoading() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(9L); + Deserializer> serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(0L); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult1); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + FetchDataInfo readResult3 = logReadResult(5, 100L, (short) 5, Arrays.asList( + new SimpleRecord("k6".getBytes(), "v6".getBytes()), + new SimpleRecord("k7".getBytes(), "v7".getBytes()) + )); + + when(log.read(5L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult3); + + FetchDataInfo readResult4 = logReadResult( + 7, + 100L, + (short) 5, + ControlRecordType.COMMIT + ); + + when(log.read(7L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult4); + + FetchDataInfo readResult5 = logReadResult( + 8, + 500L, + (short) 10, + ControlRecordType.ABORT + ); + + when(log.read(8L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult5); + + CoordinatorLoader.LoadSummary summary = loader.load(tp, coordinator).get(10, TimeUnit.SECONDS); + assertNotNull(summary); + // Includes 7 normal + 2 control (COMMIT, ABORT) + assertEquals(9, summary.numRecords()); + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k1", "v1")); + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k2", "v2")); + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k3", "v3")); + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k4", "v4")); + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k5", "v5")); + verify(coordinator).replay(5L, 100L, (short) 5, Map.entry("k6", "v6")); + verify(coordinator).replay(6L, 100L, (short) 5, Map.entry("k7", "v7")); + verify(coordinator).replayEndTransactionMarker(100L, (short) 5, TransactionResult.COMMIT); + verify(coordinator).replayEndTransactionMarker(500L, (short) 10, TransactionResult.ABORT); + verify(coordinator).updateLastWrittenOffset(2L); + verify(coordinator).updateLastWrittenOffset(5L); + verify(coordinator).updateLastWrittenOffset(7L); + verify(coordinator).updateLastWrittenOffset(8L); + verify(coordinator).updateLastCommittedOffset(0L); + } + } + + @Test + void testLoadingStoppedWhenClosed() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(100L); + Deserializer> serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + + FetchDataInfo readResult = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + CountDownLatch latch = new CountDownLatch(1); + when(log.read( + anyLong(), + eq(1000), + eq(FetchIsolation.LOG_END), + eq(true) + )).thenAnswer((InvocationOnMock invocation) -> { + latch.countDown(); + return readResult; + }); + + CompletableFuture result = loader.load(tp, coordinator); + boolean completed = latch.await(10, TimeUnit.SECONDS); + assertTrue(completed, "Log read timeout: Latch did not count down in time."); + loader.close(); + + RuntimeException ex = assertFutureThrows(RuntimeException.class, result); + assertNotNull(ex); + assertEquals("Coordinator loader is closed.", ex.getMessage()); + } + } + + @Test + void testUnknownRecordTypeAreIgnored() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(2L); + StringKeyValueDeserializer serde = mock(StringKeyValueDeserializer.class); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + + FetchDataInfo readResult = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult); + + when(serde.deserialize(any(ByteBuffer.class), any(ByteBuffer.class))) + .thenThrow(new Deserializer.UnknownRecordTypeException((short) 1)) + .thenReturn(Map.entry("k2", "v2")); + + loader.load(tp, coordinator).get(10, TimeUnit.SECONDS); + + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k2", "v2")); + } + } + + @Test + void testDeserializationErrorFailsTheLoading() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(2L); + StringKeyValueDeserializer serde = mock(StringKeyValueDeserializer.class); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + + FetchDataInfo readResult = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult); + + when(serde.deserialize(any(ByteBuffer.class), any(ByteBuffer.class))) + .thenThrow(new RuntimeException("Error!")); + + RuntimeException ex = assertFutureThrows(RuntimeException.class, loader.load(tp, coordinator)); + + assertNotNull(ex); + assertEquals(String.format("Deserializing record DefaultRecord(offset=0, timestamp=-1, key=2 bytes, value=2 bytes) from %s failed.", tp), ex.getMessage()); + } + } + + @Test + void testLoadGroupAndOffsetsWithCorruptedLog() throws Exception { + // Simulate a case where startOffset < endOffset but log is empty. This could theoretically happen + // when all the records are expired and the active segment is truncated or when the partition + // is accidentally corrupted. + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(10L); + StringKeyValueDeserializer serde = mock(StringKeyValueDeserializer.class); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + + FetchDataInfo readResult = logReadResult(0, List.of()); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult); + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)); + } + } + + @Test + void testLoadSummary() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(5L); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + MockTime time = new MockTime(); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + time, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + long startTimeMs = time.milliseconds(); + when(log.logStartOffset()).thenReturn(0L); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenAnswer((InvocationOnMock invocation) -> { + time.sleep(1000); + return readResult1; + }); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + CoordinatorLoader.LoadSummary summary = loader.load(tp, coordinator).get(10, TimeUnit.SECONDS); + assertEquals(startTimeMs, summary.startTimeMs()); + assertEquals(startTimeMs + 1000, summary.endTimeMs()); + assertEquals(5, summary.numRecords()); + assertEquals(readResult1.records.sizeInBytes() + readResult2.records.sizeInBytes(), summary.numBytes()); + } + } + + @Test + void testUpdateLastWrittenOffsetOnBatchLoaded() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(7L); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(0L, 0L, 2L); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult1); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + FetchDataInfo readResult3 = logReadResult(5, Arrays.asList( + new SimpleRecord("k6".getBytes(), "v6".getBytes()), + new SimpleRecord("k7".getBytes(), "v7".getBytes()) + )); + + when(log.read(5L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult3); + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)); + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k1", "v1")); + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k2", "v2")); + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k3", "v3")); + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k4", "v4")); + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k5", "v5")); + verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k6", "v6")); + verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k7", "v7")); + verify(coordinator, times(0)).updateLastWrittenOffset(0L); + verify(coordinator, times(1)).updateLastWrittenOffset(2L); + verify(coordinator, times(1)).updateLastWrittenOffset(5L); + verify(coordinator, times(1)).updateLastWrittenOffset(7L); + verify(coordinator, times(1)).updateLastCommittedOffset(0L); + verify(coordinator, times(1)).updateLastCommittedOffset(2L); + verify(coordinator, times(0)).updateLastCommittedOffset(5L); + } + } + + @Test + void testUpdateLastWrittenOffsetAndUpdateLastCommittedOffsetNoRecordsRead() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(0L); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(0L); + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)); + + verify(coordinator, times(0)).updateLastWrittenOffset(anyLong()); + verify(coordinator, times(0)).updateLastCommittedOffset(anyLong()); + } + } + + @Test + void testUpdateLastWrittenOffsetOnBatchLoadedWhileHighWatermarkAhead() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(7L); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(5L, 7L, 7L); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult1); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + FetchDataInfo readResult3 = logReadResult(5, Arrays.asList( + new SimpleRecord("k6".getBytes(), "v6".getBytes()), + new SimpleRecord("k7".getBytes(), "v7".getBytes()) + )); + + when(log.read(5L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult3); + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)); + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k1", "v1")); + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k2", "v2")); + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k3", "v3")); + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k4", "v4")); + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k5", "v5")); + verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k6", "v6")); + verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k7", "v7")); + verify(coordinator, times(0)).updateLastWrittenOffset(0L); + verify(coordinator, times(0)).updateLastWrittenOffset(2L); + verify(coordinator, times(0)).updateLastWrittenOffset(5L); + verify(coordinator, times(1)).updateLastWrittenOffset(7L); + verify(coordinator, times(0)).updateLastCommittedOffset(0L); + verify(coordinator, times(0)).updateLastCommittedOffset(2L); + verify(coordinator, times(0)).updateLastCommittedOffset(5L); + verify(coordinator, times(1)).updateLastCommittedOffset(7L); + } + } + + @Test + void testUpdateLastWrittenOffsetCommitInterval() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = partition -> Optional.of(7L); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + 2L + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(7L); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult1); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + FetchDataInfo readResult3 = logReadResult(5, Arrays.asList( + new SimpleRecord("k6".getBytes(), "v6".getBytes()) + )); + + when(log.read(5L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult3); + + FetchDataInfo readResult4 = logReadResult(6, Arrays.asList( + new SimpleRecord("k7".getBytes(), "v7".getBytes()) + )); + + when(log.read(6L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult4); + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)); + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k1", "v1")); + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k2", "v2")); + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k3", "v3")); + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k4", "v4")); + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k5", "v5")); + verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k6", "v6")); + verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Map.entry("k7", "v7")); + verify(coordinator, times(0)).updateLastWrittenOffset(0L); + verify(coordinator, times(1)).updateLastWrittenOffset(2L); + verify(coordinator, times(1)).updateLastWrittenOffset(5L); + verify(coordinator, times(0)).updateLastWrittenOffset(6L); + verify(coordinator, times(1)).updateLastWrittenOffset(7L); + verify(coordinator, times(0)).updateLastCommittedOffset(0L); + verify(coordinator, times(1)).updateLastCommittedOffset(2L); + verify(coordinator, times(1)).updateLastCommittedOffset(5L); + verify(coordinator, times(0)).updateLastCommittedOffset(6L); + verify(coordinator, times(1)).updateLastCommittedOffset(7L); + } + } + + @Test + void testPartitionGoesOfflineDuringLoad() throws Exception { + TopicPartition tp = new TopicPartition("foo", 0); + UnifiedLog log = mock(UnifiedLog.class); + Function> partitionLogSupplier = partition -> Optional.of(log); + Function> partitionLogEndOffsetSupplier = mock(Function.class); + StringKeyValueDeserializer serde = new StringKeyValueDeserializer(); + CoordinatorPlayback> coordinator = mock(CoordinatorPlayback.class); + + try (CoordinatorLoaderImpl> loader = new CoordinatorLoaderImpl<>( + Time.SYSTEM, + partitionLogSupplier, + partitionLogEndOffsetSupplier, + serde, + 1000, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + )) { + when(log.logStartOffset()).thenReturn(0L); + when(log.highWatermark()).thenReturn(0L); + when(partitionLogEndOffsetSupplier.apply(tp)).thenReturn(Optional.of(5L)).thenReturn(Optional.of(-1L)); + + FetchDataInfo readResult1 = logReadResult(0, Arrays.asList( + new SimpleRecord("k1".getBytes(), "v1".getBytes()), + new SimpleRecord("k2".getBytes(), "v2".getBytes()) + )); + + when(log.read(0L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult1); + + FetchDataInfo readResult2 = logReadResult(2, Arrays.asList( + new SimpleRecord("k3".getBytes(), "v3".getBytes()), + new SimpleRecord("k4".getBytes(), "v4".getBytes()), + new SimpleRecord("k5".getBytes(), "v5".getBytes()) + )); + + when(log.read(2L, 1000, FetchIsolation.LOG_END, true)) + .thenReturn(readResult2); + + assertFutureThrows(NotLeaderOrFollowerException.class, loader.load(tp, coordinator)); + } + } + + private FetchDataInfo logReadResult(long startOffset, List records) throws IOException { + return logReadResult(startOffset, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, records); + } + + private FetchDataInfo logReadResult( + long startOffset, + long producerId, + short producerEpoch, + List records + ) throws IOException { + FileRecords fileRecords = mock(FileRecords.class); + MemoryRecords memoryRecords; + if (producerId == RecordBatch.NO_PRODUCER_ID) { + memoryRecords = MemoryRecords.withRecords( + startOffset, + Compression.NONE, + records.toArray(new SimpleRecord[0]) + ); + } else { + memoryRecords = MemoryRecords.withTransactionalRecords( + startOffset, + Compression.NONE, + producerId, + producerEpoch, + 0, + RecordBatch.NO_PARTITION_LEADER_EPOCH, + records.toArray(new SimpleRecord[0]) + ); + } + + when(fileRecords.sizeInBytes()).thenReturn(memoryRecords.sizeInBytes()); + + doAnswer(invocation -> { + ByteBuffer buffer = invocation.getArgument(0, ByteBuffer.class); + buffer.put(memoryRecords.buffer().duplicate()); + buffer.flip(); + return null; + }).when(fileRecords).readInto(any(ByteBuffer.class), ArgumentMatchers.anyInt()); + + return new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords); + } + + private FetchDataInfo logReadResult( + long startOffset, + long producerId, + short producerEpoch, + ControlRecordType controlRecordType + ) throws IOException { + FileRecords fileRecords = mock(FileRecords.class); + MemoryRecords memoryRecords = MemoryRecords.withEndTransactionMarker( + startOffset, + 0L, + RecordBatch.NO_PARTITION_LEADER_EPOCH, + producerId, + producerEpoch, + new EndTransactionMarker(controlRecordType, 0) + ); + + when(fileRecords.sizeInBytes()).thenReturn(memoryRecords.sizeInBytes()); + + doAnswer(invocation -> { + ByteBuffer buffer = invocation.getArgument(0, ByteBuffer.class); + buffer.put(memoryRecords.buffer().duplicate()); + buffer.flip(); + return null; + }).when(fileRecords).readInto(any(ByteBuffer.class), ArgumentMatchers.anyInt()); + + return new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords); + } + +} diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java index ed6d269763450..68f152f2bea08 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java @@ -27,8 +27,8 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.util.Arrays; -import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.stream.IntStream; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.BATCH_FLUSH_TIME_METRIC_NAME; @@ -39,17 +39,19 @@ import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; public class CoordinatorRuntimeMetricsImplTest { private static final String METRICS_GROUP = "test-runtime-metrics"; - + private static final String OTHER_METRICS_GROUP = "test-runtime-metrics-2"; + @Test public void testMetricNames() { Metrics metrics = new Metrics(); - HashSet expectedMetrics = new HashSet<>(Arrays.asList( + Set expectedMetrics = Set.of( kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "loading"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "active"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "failed"), @@ -77,7 +79,7 @@ public void testMetricNames() { kafkaMetricName(metrics, "batch-flush-time-ms-p95"), kafkaMetricName(metrics, "batch-flush-time-ms-p99"), kafkaMetricName(metrics, "batch-flush-time-ms-p999") - )); + ); try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP)) { runtimeMetrics.registerEventQueueSizeGauge(() -> 0); @@ -110,6 +112,26 @@ public void testUpdateNumPartitionsMetrics() { } } + @Test + public void testNumPartitionsMetricsGroupIsolation() { + Metrics metrics = new Metrics(); + + try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { + IntStream.range(0, 3) + .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.INITIAL, CoordinatorState.LOADING)); + IntStream.range(0, 2) + .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.LOADING, CoordinatorState.ACTIVE)); + IntStream.range(0, 1) + .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.ACTIVE, CoordinatorState.FAILED)); + + for (String state : List.of("loading", "active", "failed")) { + assertMetricGauge(metrics, kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", state), 1); + assertMetricGauge(metrics, otherGroupKafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", state), 0); + } + } + } + @Test public void testPartitionLoadSensorMetrics() { Time time = new MockTime(); @@ -131,6 +153,29 @@ public void testPartitionLoadSensorMetrics() { } } + @ParameterizedTest + @ValueSource(strings = { + "partition-load-time-avg", + "partition-load-time-max" + }) + public void testPartitionLoadSensorMetricsGroupIsolation(String name) { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { + long startTimeMs = time.milliseconds(); + runtimeMetrics.recordPartitionLoadSensor(startTimeMs, startTimeMs + 1000); + + org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, name); + org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, name); + KafkaMetric metric = metrics.metrics().get(metricName); + KafkaMetric otherMetric = metrics.metrics().get(otherGroupMetricName); + assertNotEquals(Double.NaN, metric.metricValue()); + assertEquals(Double.NaN, otherMetric.metricValue()); + } + } + @Test public void testThreadIdleSensor() { Time time = new MockTime(); @@ -144,6 +189,22 @@ public void testThreadIdleSensor() { assertEquals(6 / 30.0, metric.metricValue()); // 'total_ms / window_ms' } + @Test + public void testThreadIdleSensorMetricsGroupIsolation() { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { + runtimeMetrics.recordThreadIdleTime(1000.0); + + org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, "thread-idle-ratio-avg"); + org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, "thread-idle-ratio-avg"); + assertNotEquals(0.0, metrics.metrics().get(metricName).metricValue()); + assertEquals(0.0, metrics.metrics().get(otherGroupMetricName).metricValue()); + } + } + @Test public void testEventQueueSize() { Time time = new MockTime(); @@ -155,6 +216,21 @@ public void testEventQueueSize() { } } + @Test + public void testEventQueueSizeMetricsGroupIsolation() { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + CoordinatorRuntimeMetricsImpl otherRuntimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { + runtimeMetrics.registerEventQueueSizeGauge(() -> 5); + otherRuntimeMetrics.registerEventQueueSizeGauge(() -> 0); + + assertMetricGauge(metrics, kafkaMetricName(metrics, "event-queue-size"), 5); + assertMetricGauge(metrics, otherGroupKafkaMetricName(metrics, "event-queue-size"), 0); + } + } + @ParameterizedTest @ValueSource(strings = { EVENT_QUEUE_TIME_METRIC_NAME, @@ -205,6 +281,45 @@ public void testHistogramMetrics(String metricNamePrefix) { assertEquals(999.0, metric.metricValue()); } + @ParameterizedTest + @ValueSource(strings = { + EVENT_QUEUE_TIME_METRIC_NAME, + EVENT_PROCESSING_TIME_METRIC_NAME, + EVENT_PURGATORY_TIME_METRIC_NAME, + BATCH_FLUSH_TIME_METRIC_NAME + }) + public void testHistogramMetricsGroupIsolation(String metricNamePrefix) { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { + switch (metricNamePrefix) { + case EVENT_QUEUE_TIME_METRIC_NAME: + runtimeMetrics.recordEventQueueTime(1000); + break; + case EVENT_PROCESSING_TIME_METRIC_NAME: + runtimeMetrics.recordEventProcessingTime(1000); + break; + case EVENT_PURGATORY_TIME_METRIC_NAME: + runtimeMetrics.recordEventPurgatoryTime(1000); + break; + case BATCH_FLUSH_TIME_METRIC_NAME: + runtimeMetrics.recordFlushTime(1000); + } + + // Check metric group isolation + for (String suffix : List.of("-max", "-p50", "-p95", "-p99", "-p999")) { + org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, metricNamePrefix + suffix); + org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, metricNamePrefix + suffix); + KafkaMetric metric = metrics.metrics().get(metricName); + KafkaMetric otherMetric = metrics.metrics().get(otherGroupMetricName); + assertNotEquals(0.0, metric.metricValue()); + assertEquals(0.0, otherMetric.metricValue()); + } + } + } + @Test public void testRecordEventPurgatoryTimeLimit() { Time time = new MockTime(); @@ -229,4 +344,8 @@ private static void assertMetricGauge(Metrics metrics, org.apache.kafka.common.M private static MetricName kafkaMetricName(Metrics metrics, String name, String... keyValue) { return metrics.metricName(name, METRICS_GROUP, "", keyValue); } + + private static MetricName otherGroupKafkaMetricName(Metrics metrics, String name, String... keyValue) { + return metrics.metricName(name, OTHER_METRICS_GROUP, "", keyValue); + } } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java index 418d58376ccd8..4a040df6712c0 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.errors.NotCoordinatorException; import org.apache.kafka.common.errors.NotEnoughReplicasException; import org.apache.kafka.common.errors.RecordTooLargeException; @@ -31,7 +32,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.MetadataProvenance; import org.apache.kafka.server.util.FutureUtils; import org.apache.kafka.server.util.timer.MockTimer; import org.apache.kafka.storage.internals.log.LogConfig; @@ -44,6 +44,7 @@ import org.mockito.ArgumentMatcher; import java.nio.BufferOverflowException; +import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -66,7 +67,7 @@ import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.CoordinatorState.INITIAL; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.CoordinatorState.LOADING; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.HighWatermarkListener.NO_OFFSET; -import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.MIN_BUFFER_SIZE; +import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.INITIAL_BUFFER_SIZE; import static org.apache.kafka.coordinator.common.runtime.TestUtil.endTransactionMarker; import static org.apache.kafka.coordinator.common.runtime.TestUtil.records; import static org.apache.kafka.coordinator.common.runtime.TestUtil.transactionalRecords; @@ -153,7 +154,7 @@ public void testScheduleLoading() { assertEquals(ACTIVE, ctx.state); // Verify that onLoaded is called. - verify(coordinator, times(1)).onLoaded(MetadataImage.EMPTY); + verify(coordinator, times(1)).onLoaded(CoordinatorMetadataImage.EMPTY); // Verify that the listener is registered. verify(writer, times(1)).registerListener( @@ -1895,11 +1896,11 @@ public void testOnNewMetadataImage() { // Coordinator 0 is loaded. It should get the current image // that is the empty one. future0.complete(null); - verify(coordinator0).onLoaded(MetadataImage.EMPTY); + verify(coordinator0).onLoaded(CoordinatorMetadataImage.EMPTY); // Publish a new image. - MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); - MetadataImage newImage = delta.apply(MetadataProvenance.EMPTY); + CoordinatorMetadataDelta delta = new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY)); + CoordinatorMetadataImage newImage = CoordinatorMetadataImage.EMPTY; runtime.onNewMetadataImage(newImage, delta); // Coordinator 0 should be notified about it. @@ -2815,9 +2816,8 @@ public void testWriteEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated( assertTrue(write1.isDone()); assertTrue(write2.isDone()); - // All timer tasks have been cancelled. TimerTask entries are not removed in MockTimer. - assertEquals(2, timer.size()); - timer.taskQueue().forEach(taskEntry -> assertTrue(taskEntry.cancelled())); + // All timer tasks have been cancelled. Hence,they have been removed in MockTimer. + assertEquals(0, timer.size()); } @Test @@ -2885,9 +2885,8 @@ public void testCoordinatorCompleteTransactionEventWriteTimeoutTaskIsCancelledWh assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); assertTrue(write1.isDone()); - // All timer tasks have been cancelled. TimerTask entries are not removed in MockTimer. - assertEquals(1, timer.size()); - timer.taskQueue().forEach(taskEntry -> assertTrue(taskEntry.cancelled())); + // All timer tasks have been cancelled. Hence, they have been removed in MockTimer. + assertEquals(0, timer.size()); } @Test @@ -2921,11 +2920,11 @@ public void testAppendRecordBatchSize() { assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); int maxBatchSize = writer.config(TP).maxMessageSize(); - assertTrue(maxBatchSize > MIN_BUFFER_SIZE); + assertTrue(maxBatchSize > INITIAL_BUFFER_SIZE); - // Generate enough records to create a batch that has 16KB < batchSize < maxBatchSize + // Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize List records = new ArrayList<>(); - for (int i = 0; i < 3000; i++) { + for (int i = 0; i < 50000; i++) { records.add("record-" + i); } @@ -2939,7 +2938,210 @@ public void testAppendRecordBatchSize() { assertFalse(write1.isCompletedExceptionally()); int batchSize = writer.entries(TP).get(0).sizeInBytes(); - assertTrue(batchSize > MIN_BUFFER_SIZE && batchSize < maxBatchSize); + assertTrue(batchSize > INITIAL_BUFFER_SIZE && batchSize < maxBatchSize); + } + + @Test + public void testCoordinatorDoNotRetainBufferLargeThanMaxMessageSize() { + MockTimer timer = new MockTimer(); + InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) { + @Override + public LogConfig config(TopicPartition tp) { + return new LogConfig(Map.of( + TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB + )); + } + }; + StringSerializer serializer = new StringSerializer(); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(new DirectEventProcessor()) + .withPartitionWriter(mockWriter) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(serializer) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Schedule the loading. + runtime.scheduleLoadOperation(TP, 10); + + // Verify the initial state. + CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + assertEquals(0L, ctx.coordinator.lastWrittenOffset()); + assertEquals(0L, ctx.coordinator.lastCommittedOffset()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + + // Generate a record larger than the maxBatchSize. + List largeRecords = List.of("A".repeat(100 * 1024 * 1024)); + + // Write #1. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, + state -> new CoordinatorResult<>(largeRecords, "response1", null, true, false) + ); + + // Verify that the write has not completed exceptionally. + // This will catch any exceptions thrown including RecordTooLargeException. + assertFalse(write1.isCompletedExceptionally()); + + // Verify that the next buffer retrieved from the bufferSupplier is the initial small one, not the large buffer. + assertEquals(INITIAL_BUFFER_SIZE, ctx.bufferSupplier.get(1).capacity()); + } + + @Test + public void testCoordinatorRetainExpandedBufferLessOrEqualToMaxMessageSize() { + MockTimer timer = new MockTimer(); + InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) { + @Override + public LogConfig config(TopicPartition tp) { + return new LogConfig(Map.of( + TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024 * 1024) // 1GB + )); + } + }; + StringSerializer serializer = new StringSerializer(); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(new DirectEventProcessor()) + .withPartitionWriter(mockWriter) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(serializer) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Schedule the loading. + runtime.scheduleLoadOperation(TP, 10); + + // Verify the initial state. + CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + assertEquals(0L, ctx.coordinator.lastWrittenOffset()); + assertEquals(0L, ctx.coordinator.lastCommittedOffset()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + + // Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize + List records = new ArrayList<>(); + for (int i = 0; i < 1000000; i++) { + records.add("record-" + i); + } + + // Write #1. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, + state -> new CoordinatorResult<>(records, "response1") + ); + + // Verify that the write has not completed exceptionally. + // This will catch any exceptions thrown including RecordTooLargeException. + assertFalse(write1.isCompletedExceptionally()); + + int batchSize = mockWriter.entries(TP).get(0).sizeInBytes(); + int maxBatchSize = mockWriter.config(TP).maxMessageSize(); + assertTrue(INITIAL_BUFFER_SIZE < batchSize && batchSize <= maxBatchSize); + + // Verify that the next buffer retrieved from the bufferSupplier is the expanded buffer. + assertTrue(ctx.bufferSupplier.get(1).capacity() > INITIAL_BUFFER_SIZE); + } + + @Test + public void testBufferShrinkWhenMaxMessageSizeReducedBelowInitialBufferSize() { + MockTimer timer = new MockTimer(); + var mockWriter = new InMemoryPartitionWriter(false) { + private LogConfig config = new LogConfig(Map.of( + TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB + )); + + @Override + public LogConfig config(TopicPartition tp) { + return config; + } + + public void updateConfig(LogConfig newConfig) { + this.config = newConfig; + } + }; + StringSerializer serializer = new StringSerializer(); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(new DirectEventProcessor()) + .withPartitionWriter(mockWriter) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(serializer) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Schedule the loading. + runtime.scheduleLoadOperation(TP, 10); + + // Verify the initial state. + CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + assertEquals(0L, ctx.coordinator.lastWrittenOffset()); + assertEquals(0L, ctx.coordinator.lastCommittedOffset()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + + List records = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + records.add("record-" + i); + } + + // Write #1. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, + state -> new CoordinatorResult<>(records, "response1") + ); + + // Verify that the write has not completed exceptionally. + // This will catch any exceptions thrown including RecordTooLargeException. + assertFalse(write1.isCompletedExceptionally()); + + int batchSize = mockWriter.entries(TP).get(0).sizeInBytes(); + int maxBatchSize = mockWriter.config(TP).maxMessageSize(); + assertTrue(batchSize <= INITIAL_BUFFER_SIZE && INITIAL_BUFFER_SIZE <= maxBatchSize); + + ByteBuffer cachedBuffer = ctx.bufferSupplier.get(1); + assertEquals(INITIAL_BUFFER_SIZE, cachedBuffer.capacity()); + // ctx.bufferSupplier.get(1); will clear cachedBuffer in bufferSupplier. Use release to put it back to bufferSupplier + ctx.bufferSupplier.release(cachedBuffer); + + // Reduce max message size below initial buffer size. + mockWriter.updateConfig(new LogConfig( + Map.of(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(INITIAL_BUFFER_SIZE - 66)))); + assertEquals(INITIAL_BUFFER_SIZE - 66, mockWriter.config(TP).maxMessageSize()); + + // Write #2. + CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, + state -> new CoordinatorResult<>(records, "response2") + ); + assertFalse(write2.isCompletedExceptionally()); + + // Verify that there is no cached buffer since the cached buffer size is greater than new maxMessageSize. + assertEquals(1, ctx.bufferSupplier.get(1).capacity()); + + // Write #3. + CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, DEFAULT_WRITE_TIMEOUT, + state -> new CoordinatorResult<>(records, "response3") + ); + assertFalse(write3.isCompletedExceptionally()); + + // Verify that the cached buffer size is equals to new maxMessageSize that less than INITIAL_BUFFER_SIZE. + assertEquals(mockWriter.config(TP).maxMessageSize(), ctx.bufferSupplier.get(1).capacity()); } @Test diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDeltaTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDeltaTest.java new file mode 100644 index 0000000000000..f65103e87d858 --- /dev/null +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataDeltaTest.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.PartitionChangeRecord; +import org.apache.kafka.common.metadata.RemoveTopicRecord; +import org.apache.kafka.common.metadata.TopicRecord; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.MetadataProvenance; + +import org.junit.jupiter.api.Test; + +import java.util.Collection; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class KRaftCoordinatorMetadataDeltaTest { + + @Test + public void testKRaftCoordinatorDeltaWithNulls() { + assertTrue(new KRaftCoordinatorMetadataDelta(null).changedTopicIds().isEmpty()); + assertTrue(new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY)).changedTopicIds().isEmpty()); + + assertTrue(new KRaftCoordinatorMetadataDelta(null).deletedTopicIds().isEmpty()); + assertTrue(new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY)).deletedTopicIds().isEmpty()); + + assertTrue(new KRaftCoordinatorMetadataDelta(null).createdTopicIds().isEmpty()); + assertTrue(new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY)).createdTopicIds().isEmpty()); + } + + @Test + public void testKRaftCoordinatorDelta() { + Uuid topicId = Uuid.randomUuid(); + String topicName = "test-topic"; + Uuid topicId2 = Uuid.randomUuid(); + String topicName2 = "test-topic2"; + Uuid deletedTopicId = Uuid.randomUuid(); + String deletedTopicName = "deleted-topic"; + Uuid changedTopicId = Uuid.randomUuid(); + String changedTopicName = "changed-topic"; + + MetadataImage image = new MetadataImageBuilder() + .addTopic(deletedTopicId, deletedTopicName, 1) + .addTopic(changedTopicId, changedTopicName, 1) + .build(); + MetadataDelta delta = new MetadataDelta(image); + delta.replay(new TopicRecord().setTopicId(topicId).setName(topicName)); + delta.replay(new TopicRecord().setTopicId(topicId2).setName(topicName2)); + delta.replay(new RemoveTopicRecord().setTopicId(deletedTopicId)); + delta.replay(new PartitionChangeRecord().setTopicId(changedTopicId).setPartitionId(0)); + + KRaftCoordinatorMetadataDelta coordinatorDelta = new KRaftCoordinatorMetadataDelta(delta); + + // created topics + Collection createdTopicIds = coordinatorDelta.createdTopicIds(); + assertNotNull(createdTopicIds); + assertEquals(2, createdTopicIds.size()); + assertTrue(createdTopicIds.contains(topicId)); + assertTrue(createdTopicIds.contains(topicId2)); + + // deleted topics + Set deletedTopicIds = coordinatorDelta.deletedTopicIds(); + assertNotNull(deletedTopicIds); + assertEquals(1, deletedTopicIds.size()); + assertTrue(deletedTopicIds.contains(deletedTopicId)); + + // changed topics (also includes created topics) + Collection changedTopicIds = coordinatorDelta.changedTopicIds(); + assertNotNull(changedTopicIds); + assertEquals(3, changedTopicIds.size()); + assertTrue(changedTopicIds.contains(changedTopicId)); + assertTrue(changedTopicIds.contains(topicId)); + assertTrue(changedTopicIds.contains(topicId2)); + + CoordinatorMetadataImage coordinatorImage = coordinatorDelta.image(); + // the image only contains the original topics, not the new topics yet since we never called delta.apply() + assertNotNull(coordinatorImage); + assertEquals(Set.of(deletedTopicName, changedTopicName), coordinatorImage.topicNames()); + + // the image contains the correct topics after calling apply + MetadataImage imageAfterApply = delta.apply(new MetadataProvenance(123, 0, 0L, true)); + CoordinatorMetadataImage coordinatorImageApply = new KRaftCoordinatorMetadataImage(imageAfterApply); + assertNotNull(coordinatorImageApply); + assertEquals(Set.of(topicName, topicName2, changedTopicName), coordinatorImageApply.topicNames()); + } + + @Test + public void testEqualsAndHashcode() { + Uuid topicId = Uuid.randomUuid(); + String topicName = "test-topic"; + Uuid topicId2 = Uuid.randomUuid(); + String topicName2 = "test-topic2"; + Uuid topicId3 = Uuid.randomUuid(); + String topicName3 = "test-topic3"; + + MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); + delta.replay(new TopicRecord().setTopicId(topicId).setName(topicName)); + delta.replay(new TopicRecord().setTopicId(topicId2).setName(topicName2)); + + KRaftCoordinatorMetadataDelta coordinatorDelta = new KRaftCoordinatorMetadataDelta(delta); + KRaftCoordinatorMetadataDelta coordinatorDeltaCopy = new KRaftCoordinatorMetadataDelta(delta); + + MetadataDelta delta2 = new MetadataDelta(MetadataImage.EMPTY); + delta.replay(new TopicRecord().setTopicId(topicId3).setName(topicName3)); + KRaftCoordinatorMetadataDelta coordinatorDelta2 = new KRaftCoordinatorMetadataDelta(delta2); + + assertEquals(coordinatorDelta, coordinatorDeltaCopy); + assertEquals(coordinatorDelta.hashCode(), coordinatorDeltaCopy.hashCode()); + assertNotEquals(coordinatorDelta, coordinatorDelta2); + assertNotEquals(coordinatorDelta.hashCode(), coordinatorDelta2.hashCode()); + } +} diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImageTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImageTest.java new file mode 100644 index 0000000000000..88975713d8ac2 --- /dev/null +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KRaftCoordinatorMetadataImageTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.image.MetadataImage; + +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; + +class KRaftCoordinatorMetadataImageTest { + + @Test + public void testKRaftCoordinatorMetadataImage() { + Uuid topicId = Uuid.randomUuid(); + String topicName = "test-topic"; + int partitionCount = 2; + Uuid topicId2 = Uuid.randomUuid(); + String topicName2 = "test-topic2"; + int partitionCount2 = 4; + Uuid noPartitionTopicId = Uuid.randomUuid(); + String noPartitionTopic = "no-partition-topic"; + long imageVersion = 123L; + + MetadataImage metadataImage = new MetadataImageBuilder() + .addTopic(topicId, topicName, partitionCount) + .addTopic(topicId2, topicName2, partitionCount2) + .addTopic(noPartitionTopicId, noPartitionTopic, 0) + .addRacks() + .build(imageVersion); + + KRaftCoordinatorMetadataImage image = new KRaftCoordinatorMetadataImage(metadataImage); + + assertEquals(Set.of(topicName, topicName2, noPartitionTopic), image.topicNames()); + assertEquals(Set.of(topicId, topicId2, noPartitionTopicId), image.topicIds()); + + image.topicMetadata(topicName).ifPresentOrElse( + topicMetadata -> { + assertEquals(topicName, topicMetadata.name()); + assertEquals(topicId, topicMetadata.id()); + assertEquals(partitionCount, topicMetadata.partitionCount()); + List racks0 = topicMetadata.partitionRacks(0); + List racks1 = topicMetadata.partitionRacks(1); + assertEquals(2, racks0.size()); + assertEquals(2, racks1.size()); + assertEquals("rack0", racks0.get(0)); + assertEquals("rack1", racks0.get(1)); + assertEquals("rack1", racks1.get(0)); + assertEquals("rack2", racks1.get(1)); + }, + () -> fail("Expected topic metadata for " + topicName) + ); + + image.topicMetadata(topicName2).ifPresentOrElse( + topicMetadata -> { + assertEquals(topicName2, topicMetadata.name()); + assertEquals(topicId2, topicMetadata.id()); + assertEquals(partitionCount2, topicMetadata.partitionCount()); + List racks0 = topicMetadata.partitionRacks(0); + List racks1 = topicMetadata.partitionRacks(1); + assertEquals(2, racks0.size()); + assertEquals(2, racks1.size()); + assertEquals("rack0", racks0.get(0)); + assertEquals("rack1", racks0.get(1)); + assertEquals("rack1", racks1.get(0)); + assertEquals("rack2", racks1.get(1)); + }, + () -> fail("Expected topic metadata for " + topicName) + ); + + image.topicMetadata(noPartitionTopic).ifPresentOrElse( + topicMetadata -> { + assertEquals(noPartitionTopic, topicMetadata.name()); + assertEquals(noPartitionTopicId, topicMetadata.id()); + assertEquals(0, topicMetadata.partitionCount()); + List racks = topicMetadata.partitionRacks(0); + assertEquals(0, racks.size()); + }, + () -> fail("Expected topic metadata for " + topicName) + ); + + assertNotNull(image.emptyDelta()); + + assertEquals(metadataImage.offset(), image.version()); + assertEquals(imageVersion, image.version()); + + assertFalse(image.isEmpty()); + } + + @Test + public void testEqualsAndHashcode() { + Uuid topicId = Uuid.randomUuid(); + String topicName = "test-topic"; + int partitionCount = 2; + Uuid topicId2 = Uuid.randomUuid(); + String topicName2 = "test-topic2"; + int partitionCount2 = 4; + long imageVersion = 123L; + + MetadataImage metadataImage = new MetadataImageBuilder() + .addTopic(topicId, topicName, partitionCount) + .addRacks() + .build(imageVersion); + + KRaftCoordinatorMetadataImage coordinatorMetadataImage = new KRaftCoordinatorMetadataImage(metadataImage); + KRaftCoordinatorMetadataImage coordinatorMetadataImageCopy = new KRaftCoordinatorMetadataImage(metadataImage); + + MetadataImage metadataImage2 = new MetadataImageBuilder() + .addTopic(topicId2, topicName2, partitionCount2) + .addRacks() + .build(imageVersion); + + KRaftCoordinatorMetadataImage coordinatorMetadataImage2 = new KRaftCoordinatorMetadataImage(metadataImage2); + + assertEquals(coordinatorMetadataImage, coordinatorMetadataImageCopy); + assertNotEquals(coordinatorMetadataImage, coordinatorMetadataImage2); + + assertEquals(coordinatorMetadataImage.hashCode(), coordinatorMetadataImageCopy.hashCode()); + assertNotEquals(coordinatorMetadataImage.hashCode(), coordinatorMetadataImage2.hashCode()); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MetadataImageBuilder.java similarity index 88% rename from group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java rename to coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MetadataImageBuilder.java index 23a01a6024176..142915a69e610 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MetadataImageBuilder.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.coordinator.group; +package org.apache.kafka.coordinator.common.runtime; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.metadata.PartitionRecord; @@ -75,4 +75,12 @@ public MetadataImage build() { public MetadataImage build(long version) { return delta.apply(new MetadataProvenance(version, 0, 0L, true)); } + + public CoordinatorMetadataImage buildCoordinatorMetadataImage() { + return new KRaftCoordinatorMetadataImage(build()); + } + + public CoordinatorMetadataImage buildCoordinatorMetadataImage(long version) { + return new KRaftCoordinatorMetadataImage(build(version)); + } } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java index 40b946bbefd9d..fc2ace6698ce9 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java @@ -49,10 +49,7 @@ CoordinatorResult execute() { } } - public static class ExecutorResult { - public final String key; - public final CoordinatorResult result; - + public record ExecutorResult(String key, CoordinatorResult result) { public ExecutorResult( String key, CoordinatorResult result @@ -61,24 +58,6 @@ public ExecutorResult( this.result = Objects.requireNonNull(result); } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ExecutorResult that = (ExecutorResult) o; - - if (!Objects.equals(key, that.key)) return false; - return Objects.equals(result, that.result); - } - - @Override - public int hashCode() { - int result = key.hashCode(); - result = 31 * result + this.result.hashCode(); - return result; - } - @Override public String toString() { return "ExecutorResult(" + diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorShard.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorShard.java index 1fec7a9e00085..28167504f57f6 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorShard.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorShard.java @@ -31,7 +31,7 @@ * A simple Coordinator implementation that stores the records into a set. */ public class MockCoordinatorShard implements CoordinatorShard { - static record RecordAndMetadata( + record RecordAndMetadata( long offset, long producerId, short producerEpoch, diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java index 5c55f59d608f5..78e14ac576b39 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.PriorityQueue; import java.util.concurrent.TimeUnit; @@ -36,54 +35,13 @@ public class MockCoordinatorTimer implements CoordinatorTimer { /** * Represents a scheduled timeout. */ - public static class ScheduledTimeout { - public final String key; - public final long deadlineMs; - public final TimeoutOperation operation; - - public ScheduledTimeout( - String key, - long deadlineMs, - TimeoutOperation operation - ) { - this.key = key; - this.deadlineMs = deadlineMs; - this.operation = operation; - } + public record ScheduledTimeout(String key, long deadlineMs, TimeoutOperation operation) { } /** * Represents an expired timeout. */ - public static class ExpiredTimeout { - public final String key; - public final CoordinatorResult result; - - public ExpiredTimeout( - String key, - CoordinatorResult result - ) { - this.key = key; - this.result = result; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ExpiredTimeout that = (ExpiredTimeout) o; - - if (!Objects.equals(key, that.key)) return false; - return Objects.equals(result, that.result); - } - - @Override - public int hashCode() { - int result1 = key != null ? key.hashCode() : 0; - result1 = 31 * result1 + (result != null ? result.hashCode() : 0); - return result1; - } + public record ExpiredTimeout(String key, CoordinatorResult result) { } private final Time time; diff --git a/core/src/main/java/kafka/server/ClientRequestQuotaManager.java b/core/src/main/java/kafka/server/ClientRequestQuotaManager.java index 1d0695879871b..3f2398b8358f5 100644 --- a/core/src/main/java/kafka/server/ClientRequestQuotaManager.java +++ b/core/src/main/java/kafka/server/ClientRequestQuotaManager.java @@ -27,15 +27,14 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.server.config.ClientQuotaManagerConfig; import org.apache.kafka.server.quota.ClientQuotaCallback; +import org.apache.kafka.server.quota.ClientQuotaManager; import org.apache.kafka.server.quota.QuotaType; import org.apache.kafka.server.quota.QuotaUtils; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; -import scala.jdk.javaapi.CollectionConverters; -import scala.jdk.javaapi.OptionConverters; - @SuppressWarnings("this-escape") public class ClientRequestQuotaManager extends ClientQuotaManager { // Since exemptSensor is for all clients and has a constant name, we do not expire exemptSensor and only @@ -56,8 +55,8 @@ public ClientRequestQuotaManager( String threadNamePrefix, Optional> quotaCallbackPlugin ) { - super(config, metrics, QuotaType.REQUEST, time, threadNamePrefix, OptionConverters.toScala(quotaCallbackPlugin)); - this.maxThrottleTimeMs = TimeUnit.SECONDS.toMillis(config.quotaWindowSizeSeconds); + super(config, metrics, QuotaType.REQUEST, time, threadNamePrefix, quotaCallbackPlugin); + this.maxThrottleTimeMs = TimeUnit.SECONDS.toMillis(config.quotaWindowSizeSeconds()); this.metrics = metrics; this.exemptMetricName = metrics.metricName("exempt-request-time", QuotaType.REQUEST.toString(), "Tracking exempt-request-time utilization percentage"); exemptSensor = getOrCreateSensor(EXEMPT_SENSOR_NAME, DEFAULT_INACTIVE_EXEMPT_SENSOR_EXPIRATION_TIME_SECONDS, sensor -> sensor.add(exemptMetricName, new Rate())); @@ -72,8 +71,8 @@ private void recordExempt(double value) { } /** - * Records that a user/clientId changed request processing time being throttled. If quota has been violated, return - * throttle time in milliseconds. Throttle time calculation may be overridden by sub-classes. + * Records that a user/clientId changed request processing time being throttled. If the quota has been violated, return + * throttle time in milliseconds. Subclasses may override throttle time calculation. * @param request client request * @return Number of milliseconds to throttle in case of quota violation. Zero otherwise */ @@ -103,8 +102,8 @@ public long throttleTime(QuotaViolationException e, long timeMs) { } @Override - public MetricName clientQuotaMetricName(scala.collection.immutable.Map quotaMetricTags) { - return metrics.metricName("request-time", QuotaType.REQUEST.toString(), "Tracking request-time per user/client-id", CollectionConverters.asJava(quotaMetricTags)); + public MetricName clientQuotaMetricName(Map quotaMetricTags) { + return metrics.metricName("request-time", QuotaType.REQUEST.toString(), "Tracking request-time per user/client-id", quotaMetricTags); } private double nanosToPercentage(long nanos) { diff --git a/core/src/main/java/kafka/server/QuotaFactory.java b/core/src/main/java/kafka/server/QuotaFactory.java index cf9701409fd4c..b672be4265053 100644 --- a/core/src/main/java/kafka/server/QuotaFactory.java +++ b/core/src/main/java/kafka/server/QuotaFactory.java @@ -25,12 +25,12 @@ import org.apache.kafka.server.config.QuotaConfig; import org.apache.kafka.server.config.ReplicationQuotaManagerConfig; import org.apache.kafka.server.quota.ClientQuotaCallback; +import org.apache.kafka.server.quota.ClientQuotaManager; +import org.apache.kafka.server.quota.ControllerMutationQuotaManager; import org.apache.kafka.server.quota.QuotaType; import java.util.Optional; -import scala.Option; -import scala.jdk.javaapi.OptionConverters; public class QuotaFactory { @@ -51,61 +51,14 @@ public void record(long value) { } }; - public static class QuotaManagers { - private final ClientQuotaManager fetch; - private final ClientQuotaManager produce; - private final ClientRequestQuotaManager request; - private final ControllerMutationQuotaManager controllerMutation; - private final ReplicationQuotaManager leader; - private final ReplicationQuotaManager follower; - private final ReplicationQuotaManager alterLogDirs; - private final Optional> clientQuotaCallbackPlugin; - - public QuotaManagers(ClientQuotaManager fetch, ClientQuotaManager produce, ClientRequestQuotaManager request, - ControllerMutationQuotaManager controllerMutation, ReplicationQuotaManager leader, - ReplicationQuotaManager follower, ReplicationQuotaManager alterLogDirs, - Optional> clientQuotaCallbackPlugin) { - this.fetch = fetch; - this.produce = produce; - this.request = request; - this.controllerMutation = controllerMutation; - this.leader = leader; - this.follower = follower; - this.alterLogDirs = alterLogDirs; - this.clientQuotaCallbackPlugin = clientQuotaCallbackPlugin; - } - - public ClientQuotaManager fetch() { - return fetch; - } - - public ClientQuotaManager produce() { - return produce; - } - - public ClientRequestQuotaManager request() { - return request; - } - - public ControllerMutationQuotaManager controllerMutation() { - return controllerMutation; - } - - public ReplicationQuotaManager leader() { - return leader; - } - - public ReplicationQuotaManager follower() { - return follower; - } - - public ReplicationQuotaManager alterLogDirs() { - return alterLogDirs; - } - - public Optional> clientQuotaCallbackPlugin() { - return clientQuotaCallbackPlugin; - } + public record QuotaManagers(ClientQuotaManager fetch, + ClientQuotaManager produce, + ClientRequestQuotaManager request, + ControllerMutationQuotaManager controllerMutation, + ReplicationQuotaManager leader, + ReplicationQuotaManager follower, + ReplicationQuotaManager alterLogDirs, + Optional> clientQuotaCallbackPlugin) { public void shutdown() { fetch.shutdown(); @@ -124,13 +77,12 @@ public static QuotaManagers instantiate( String role ) { Optional> clientQuotaCallbackPlugin = createClientQuotaCallback(cfg, metrics, role); - Option> clientQuotaCallbackPluginOption = OptionConverters.toScala(clientQuotaCallbackPlugin); return new QuotaManagers( - new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.FETCH, time, threadNamePrefix, clientQuotaCallbackPluginOption), - new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.PRODUCE, time, threadNamePrefix, clientQuotaCallbackPluginOption), + new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.FETCH, time, threadNamePrefix, clientQuotaCallbackPlugin), + new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.PRODUCE, time, threadNamePrefix, clientQuotaCallbackPlugin), new ClientRequestQuotaManager(clientConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin), - new ControllerMutationQuotaManager(clientControllerMutationConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPluginOption), + new ControllerMutationQuotaManager(clientControllerMutationConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin), new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.LEADER_REPLICATION, time), new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.FOLLOWER_REPLICATION, time), new ReplicationQuotaManager(alterLogDirsReplicationConfig(cfg), metrics, QuotaType.ALTER_LOG_DIRS_REPLICATION, time), diff --git a/core/src/main/java/kafka/server/TierStateMachine.java b/core/src/main/java/kafka/server/TierStateMachine.java index 902bc62cbb953..9d8dcafd20382 100644 --- a/core/src/main/java/kafka/server/TierStateMachine.java +++ b/core/src/main/java/kafka/server/TierStateMachine.java @@ -26,12 +26,16 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.LeaderEndPoint; +import org.apache.kafka.server.PartitionFetchState; +import org.apache.kafka.server.ReplicaState; import org.apache.kafka.server.common.CheckpointFile; import org.apache.kafka.server.common.OffsetAndEpoch; import org.apache.kafka.server.log.remote.storage.RemoteLogManager; import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; import org.apache.kafka.server.log.remote.storage.RemoteStorageException; import org.apache.kafka.server.log.remote.storage.RemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.RemoteStorageNotReadyException; import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; import org.apache.kafka.storage.internals.log.EpochEntry; import org.apache.kafka.storage.internals.log.LogFileUtils; @@ -55,9 +59,6 @@ import java.util.Map; import java.util.Optional; -import scala.Option; -import scala.jdk.javaapi.CollectionConverters; - import static org.apache.kafka.storage.internals.log.LogStartOffsetIncrementReason.LeaderOffsetIncremented; /** @@ -96,7 +97,7 @@ PartitionFetchState start(TopicPartition topicPartition, PartitionFetchState currentFetchState, PartitionData fetchPartitionData) throws Exception { OffsetAndEpoch epochAndLeaderLocalStartOffset = leader.fetchEarliestLocalOffset(topicPartition, currentFetchState.currentLeaderEpoch()); - int epoch = epochAndLeaderLocalStartOffset.leaderEpoch(); + int epoch = epochAndLeaderLocalStartOffset.epoch(); long leaderLocalStartOffset = epochAndLeaderLocalStartOffset.offset(); long offsetToFetch; @@ -123,8 +124,8 @@ PartitionFetchState start(TopicPartition topicPartition, long initialLag = leaderEndOffset - offsetToFetch; - return PartitionFetchState.apply(currentFetchState.topicId(), offsetToFetch, Option.apply(initialLag), currentFetchState.currentLeaderEpoch(), - Fetching$.MODULE$, unifiedLog.latestEpoch()); + return new PartitionFetchState(currentFetchState.topicId(), offsetToFetch, Optional.of(initialLag), currentFetchState.currentLeaderEpoch(), + ReplicaState.FETCHING, unifiedLog.latestEpoch()); } @@ -136,12 +137,12 @@ private OffsetForLeaderEpochResponseData.EpochEndOffset fetchEarlierEpochEndOffs // Find the end-offset for the epoch earlier to the given epoch from the leader Map partitionsWithEpochs = new HashMap<>(); partitionsWithEpochs.put(partition, new OffsetForLeaderEpochRequestData.OffsetForLeaderPartition().setPartition(partition.partition()).setCurrentLeaderEpoch(currentLeaderEpoch).setLeaderEpoch(previousEpoch)); - Option maybeEpochEndOffset = leader.fetchEpochEndOffsets(CollectionConverters.asScala(partitionsWithEpochs)).get(partition); - if (maybeEpochEndOffset.isEmpty()) { + var epochEndOffset = leader.fetchEpochEndOffsets(partitionsWithEpochs).get(partition); + + if (epochEndOffset == null) { throw new KafkaException("No response received for partition: " + partition); } - OffsetForLeaderEpochResponseData.EpochEndOffset epochEndOffset = maybeEpochEndOffset.get(); if (epochEndOffset.errorCode() != Errors.NONE.code()) { throw Errors.forCode(epochEndOffset.errorCode()).exception(); } @@ -230,6 +231,10 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, } } + if (!rlm.isPartitionReady(topicPartition)) { + throw new RemoteStorageNotReadyException("RemoteLogManager is not ready for partition: " + topicPartition); + } + RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlm.fetchRemoteLogSegmentMetadata(topicPartition, targetEpoch, previousOffsetToLeaderLocalLogStartOffset) .orElseThrow(() -> buildRemoteStorageException(topicPartition, targetEpoch, currentLeaderEpoch, leaderLocalLogStartOffset, leaderLogStartOffset)); diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index c898f448a1a04..e03ab35e90eb4 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -36,9 +36,9 @@ import org.apache.kafka.coordinator.share.ShareCoordinator; import org.apache.kafka.metadata.ConfigRepository; import org.apache.kafka.metadata.MetadataCache; +import org.apache.kafka.security.DelegationTokenManager; import org.apache.kafka.server.ApiVersionManager; import org.apache.kafka.server.ClientMetricsManager; -import org.apache.kafka.server.DelegationTokenManager; import org.apache.kafka.server.authorizer.Authorizer; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; @@ -69,7 +69,7 @@ public class KafkaApisBuilder { private DelegationTokenManager tokenManager = null; private ApiVersionManager apiVersionManager = null; private ClientMetricsManager clientMetricsManager = null; - private Optional shareCoordinator = Optional.empty(); + private ShareCoordinator shareCoordinator = null; private GroupConfigManager groupConfigManager = null; public KafkaApisBuilder setRequestChannel(RequestChannel requestChannel) { @@ -97,7 +97,7 @@ public KafkaApisBuilder setTxnCoordinator(TransactionCoordinator txnCoordinator) return this; } - public KafkaApisBuilder setShareCoordinator(Optional shareCoordinator) { + public KafkaApisBuilder setShareCoordinator(ShareCoordinator shareCoordinator) { this.shareCoordinator = shareCoordinator; return this; } @@ -194,8 +194,8 @@ public KafkaApis build() { if (replicaManager == null) throw new RuntimeException("You must set replicaManager"); if (groupCoordinator == null) throw new RuntimeException("You must set groupCoordinator"); if (txnCoordinator == null) throw new RuntimeException("You must set txnCoordinator"); - if (autoTopicCreationManager == null) - throw new RuntimeException("You must set autoTopicCreationManager"); + if (shareCoordinator == null) throw new RuntimeException("You must set shareCoordinator"); + if (autoTopicCreationManager == null) throw new RuntimeException("You must set autoTopicCreationManager"); if (config == null) config = new KafkaConfig(Map.of()); if (configRepository == null) throw new RuntimeException("You must set configRepository"); if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); @@ -213,7 +213,7 @@ public KafkaApis build() { replicaManager, groupCoordinator, txnCoordinator, - OptionConverters.toScala(shareCoordinator), + shareCoordinator, autoTopicCreationManager, brokerId, config, diff --git a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java index 71d192dc5ea31..6de61915e8efc 100644 --- a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java @@ -24,6 +24,7 @@ import org.apache.kafka.server.config.ServerLogConfigs; import org.apache.kafka.server.util.Scheduler; import org.apache.kafka.storage.internals.log.CleanerConfig; +import org.apache.kafka.storage.internals.log.LogCleaner; import org.apache.kafka.storage.internals.log.LogConfig; import org.apache.kafka.storage.internals.log.LogDirFailureChannel; import org.apache.kafka.storage.internals.log.ProducerStateManagerConfig; @@ -172,6 +173,8 @@ public LogManager build() { logDirFailureChannel, time, remoteStorageSystemEnable, - initialTaskDelayMs); + initialTaskDelayMs, + LogCleaner::new + ); } } diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index f001a039ae23a..5426d55a64da3 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -33,7 +33,6 @@ import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; import scala.Option; @@ -121,8 +120,6 @@ public ReplicaManager build() { logDirFailureChannel, alterPartitionManager, brokerTopicStats, - new AtomicBoolean(false), - Option.empty(), Option.empty(), Option.empty(), Option.empty(), diff --git a/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java b/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java index d02fd9d3a7dae..9b6a04ee979eb 100644 --- a/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java +++ b/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java @@ -89,7 +89,7 @@ public DescribeTopicPartitionsResponseData handleDescribeTopicPartitionsRequest( // Do not disclose the existence of topics unauthorized for Describe, so we've not even checked if they exist or not Set unauthorizedForDescribeTopicMetadata = new HashSet<>(); - Stream authorizedTopicsStream = topics.stream().sorted().filter(topicName -> { + Stream authorizedTopicsStream = topics.stream().filter(topicName -> { boolean isAuthorized = authHelper.authorize( abstractRequest.context(), DESCRIBE, TOPIC, topicName, true, true, 1); if (!fetchAllTopics && !isAuthorized) { @@ -99,7 +99,7 @@ public DescribeTopicPartitionsResponseData handleDescribeTopicPartitionsRequest( ); } return isAuthorized; - }); + }).sorted(); DescribeTopicPartitionsResponseData response = metadataCache.describeTopicResponse( authorizedTopicsStream.iterator(), diff --git a/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java b/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java index e3fa44ff35cd2..3cb226e06867c 100644 --- a/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java +++ b/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java @@ -17,8 +17,6 @@ package kafka.server.logger; -import kafka.utils.LoggingController; - import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.common.config.LogLevelConfig; import org.apache.kafka.common.errors.ClusterAuthorizationException; @@ -27,6 +25,7 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.server.logger.LoggingController; import org.slf4j.Logger; @@ -131,9 +130,9 @@ void validateLogLevelConfigs(Collection ops) { break; case DELETE: validateLoggerNameExists(loggerName); - if (loggerName.equals(LoggingController.ROOT_LOGGER())) { + if (loggerName.equals(LoggingController.ROOT_LOGGER)) { throw new InvalidRequestException("Removing the log level of the " + - LoggingController.ROOT_LOGGER() + " logger is not allowed"); + LoggingController.ROOT_LOGGER + " logger is not allowed"); } break; case APPEND: diff --git a/core/src/main/java/kafka/server/share/DelayedShareFetch.java b/core/src/main/java/kafka/server/share/DelayedShareFetch.java index d68ed06d3070d..969029a6ea582 100644 --- a/core/src/main/java/kafka/server/share/DelayedShareFetch.java +++ b/core/src/main/java/kafka/server/share/DelayedShareFetch.java @@ -17,25 +17,37 @@ package kafka.server.share; import kafka.cluster.Partition; -import kafka.server.LogReadResult; import kafka.server.QuotaFactory; import kafka.server.ReplicaManager; import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.KafkaStorageException; +import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.raft.errors.NotLeaderException; +import org.apache.kafka.server.LogReadResult; import org.apache.kafka.server.metrics.KafkaMetricsGroup; import org.apache.kafka.server.purgatory.DelayedOperation; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; +import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey; import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.share.fetch.ShareFetchPartitionData; import org.apache.kafka.server.share.metrics.ShareGroupMetrics; import org.apache.kafka.server.storage.log.FetchIsolation; +import org.apache.kafka.server.storage.log.FetchPartitionData; +import org.apache.kafka.server.util.timer.TimerTask; +import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogOffsetSnapshot; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; import com.yammer.metrics.core.Meter; @@ -44,11 +56,18 @@ import java.util.ArrayList; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -58,6 +77,8 @@ import scala.jdk.javaapi.CollectionConverters; import scala.runtime.BoxedUnit; +import static kafka.server.share.PendingRemoteFetches.RemoteFetch; + /** * A delayed share fetch operation has been introduced in case there is a share fetch request which cannot be completed instantaneously. */ @@ -80,10 +101,18 @@ public class DelayedShareFetch extends DelayedOperation { * Metric for the rate of expired delayed fetch requests. */ private final Meter expiredRequestMeter; + /** + * fetchId serves as a token while acquiring/releasing share partition's fetch lock. + */ + private final Uuid fetchId; // Tracks the start time to acquire any share partition for a fetch request. private long acquireStartTimeMs; private LinkedHashMap partitionsAcquired; - private LinkedHashMap partitionsAlreadyFetched; + private LinkedHashMap localPartitionsAlreadyFetched; + private Optional pendingRemoteFetchesOpt; + private Optional remoteStorageFetchException; + private final AtomicBoolean outsidePurgatoryCallbackLock; + private final long remoteFetchMaxWaitMs; /** * This function constructs an instance of delayed share fetch operation for completing share fetch @@ -95,6 +124,7 @@ public class DelayedShareFetch extends DelayedOperation { * @param sharePartitions The share partitions referenced in the share fetch request. * @param shareGroupMetrics The share group metrics to record the metrics. * @param time The system time. + * @param remoteFetchMaxWaitMs The max wait time for a share fetch request having remote storage fetch. */ public DelayedShareFetch( ShareFetch shareFetch, @@ -102,7 +132,8 @@ public DelayedShareFetch( BiConsumer exceptionHandler, LinkedHashMap sharePartitions, ShareGroupMetrics shareGroupMetrics, - Time time + Time time, + long remoteFetchMaxWaitMs ) { this(shareFetch, replicaManager, @@ -110,10 +141,27 @@ public DelayedShareFetch( sharePartitions, PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM), shareGroupMetrics, - time + time, + Optional.empty(), + Uuid.randomUuid(), + remoteFetchMaxWaitMs ); } + /** + * This function constructs an instance of delayed share fetch operation for completing share fetch + * requests instantaneously or with delay. The direct usage of this constructor is only from tests. + * + * @param shareFetch The share fetch parameters of the share fetch request. + * @param replicaManager The replica manager instance used to read from log/complete the request. + * @param exceptionHandler The handler to complete share fetch requests with exception. + * @param sharePartitions The share partitions referenced in the share fetch request. + * @param partitionMaxBytesStrategy The strategy to identify the max bytes for topic partitions in the share fetch request. + * @param shareGroupMetrics The share group metrics to record the metrics. + * @param time The system time. + * @param pendingRemoteFetchesOpt Optional containing an in-flight remote fetch object or an empty optional. + * @param remoteFetchMaxWaitMs The max wait time for a share fetch request having remote storage fetch. + */ DelayedShareFetch( ShareFetch shareFetch, ReplicaManager replicaManager, @@ -121,19 +169,27 @@ public DelayedShareFetch( LinkedHashMap sharePartitions, PartitionMaxBytesStrategy partitionMaxBytesStrategy, ShareGroupMetrics shareGroupMetrics, - Time time + Time time, + Optional pendingRemoteFetchesOpt, + Uuid fetchId, + long remoteFetchMaxWaitMs ) { - super(shareFetch.fetchParams().maxWaitMs, Optional.empty()); + super(shareFetch.fetchParams().maxWaitMs); this.shareFetch = shareFetch; this.replicaManager = replicaManager; this.partitionsAcquired = new LinkedHashMap<>(); - this.partitionsAlreadyFetched = new LinkedHashMap<>(); + this.localPartitionsAlreadyFetched = new LinkedHashMap<>(); this.exceptionHandler = exceptionHandler; this.sharePartitions = sharePartitions; this.partitionMaxBytesStrategy = partitionMaxBytesStrategy; this.shareGroupMetrics = shareGroupMetrics; this.time = time; this.acquireStartTimeMs = time.hiResClockMs(); + this.pendingRemoteFetchesOpt = pendingRemoteFetchesOpt; + this.remoteStorageFetchException = Optional.empty(); + this.fetchId = fetchId; + this.outsidePurgatoryCallbackLock = new AtomicBoolean(false); + this.remoteFetchMaxWaitMs = remoteFetchMaxWaitMs; // Register metrics for DelayedShareFetch. KafkaMetricsGroup metricsGroup = new KafkaMetricsGroup("kafka.server", "DelayedShareFetchMetrics"); this.expiredRequestMeter = metricsGroup.newMeter(EXPIRES_PER_SEC, "requests", TimeUnit.SECONDS); @@ -148,71 +204,88 @@ public void onExpiration() { * Complete the share fetch operation by fetching records for all partitions in the share fetch request irrespective * of whether they have any acquired records. This is called when the fetch operation is forced to complete either * because records can be acquired for some partitions or due to MaxWaitMs timeout. + *

    + * On operation timeout, onComplete is invoked, last try occurs to acquire partitions and read + * from log, if acquired. The fetch will only happen from local log and not remote storage, on + * operation expiration. */ @Override public void onComplete() { - // We are utilizing lock so that onComplete doesn't do a dirty read for instance variables - - // partitionsAcquired and partitionsAlreadyFetched, since these variables can get updated in a different tryComplete thread. - lock.lock(); log.trace("Completing the delayed share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), partitionsAcquired.keySet()); - try { - LinkedHashMap topicPartitionData; - // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. - if (partitionsAcquired.isEmpty()) { - topicPartitionData = acquirablePartitions(); - // The TopicPartitionsAcquireTimeMs metric signifies the tension when acquiring the locks - // for the share partition, hence if no partitions are yet acquired by tryComplete, - // we record the metric here. Do not check if the request has successfully acquired any - // partitions now or not, as then the upper bound of request timeout shall be recorded - // for the metric. - updateAcquireElapsedTimeMetric(); - } else { - // tryComplete invoked forceComplete, so we can use the data from tryComplete. - topicPartitionData = partitionsAcquired; - } - - if (topicPartitionData.isEmpty()) { - // No locks for share partitions could be acquired, so we complete the request with an empty response. - shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), 0); - shareFetch.maybeComplete(Map.of()); + if (remoteStorageFetchException.isPresent()) { + completeErroneousRemoteShareFetchRequest(); + } else if (pendingRemoteFetchesOpt.isPresent()) { + if (maybeRegisterCallbackPendingRemoteFetch()) { + log.trace("Registered remote storage fetch callback for group {}, member {}, " + + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), + partitionsAcquired.keySet()); return; - } else { - // Update metric to record acquired to requested partitions. - double requestTopicToAcquired = (double) topicPartitionData.size() / shareFetch.topicIdPartitions().size(); - shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (requestTopicToAcquired * 100)); } - log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", - topicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); + completeRemoteStorageShareFetchRequest(); + } else { + completeLocalLogShareFetchRequest(); + } + } - completeShareFetchRequest(topicPartitionData); - } finally { - lock.unlock(); + private void completeLocalLogShareFetchRequest() { + LinkedHashMap topicPartitionData; + // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. + if (partitionsAcquired.isEmpty()) { + topicPartitionData = acquirablePartitions(sharePartitions); + // The TopicPartitionsAcquireTimeMs metric signifies the tension when acquiring the locks + // for the share partition, hence if no partitions are yet acquired by tryComplete, + // we record the metric here. Do not check if the request has successfully acquired any + // partitions now or not, as then the upper bound of request timeout shall be recorded + // for the metric. + updateAcquireElapsedTimeMetric(); + } else { + // tryComplete invoked forceComplete, so we can use the data from tryComplete. + topicPartitionData = partitionsAcquired; } + + if (topicPartitionData.isEmpty()) { + // No locks for share partitions could be acquired, so we complete the request with an empty response. + shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), 0); + shareFetch.maybeComplete(Map.of()); + return; + } else { + // Update metric to record acquired to requested partitions. + double requestTopicToAcquired = (double) topicPartitionData.size() / shareFetch.topicIdPartitions().size(); + shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (requestTopicToAcquired * 100)); + } + log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", + topicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); + + processAcquiredTopicPartitionsForLocalLogFetch(topicPartitionData); } - private void completeShareFetchRequest(LinkedHashMap topicPartitionData) { + private void processAcquiredTopicPartitionsForLocalLogFetch(LinkedHashMap topicPartitionData) { try { LinkedHashMap responseData; - if (partitionsAlreadyFetched.isEmpty()) + if (localPartitionsAlreadyFetched.isEmpty()) responseData = readFromLog( topicPartitionData, partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size())); else // There shouldn't be a case when we have a partitionsAlreadyFetched value here and this variable is getting // updated in a different tryComplete thread. - responseData = combineLogReadResponse(topicPartitionData, partitionsAlreadyFetched); + responseData = combineLogReadResponse(topicPartitionData, localPartitionsAlreadyFetched); + + resetFetchOffsetMetadataForRemoteFetchPartitions(topicPartitionData, responseData); List shareFetchPartitionDataList = new ArrayList<>(); - responseData.forEach((topicIdPartition, logReadResult) -> - shareFetchPartitionDataList.add(new ShareFetchPartitionData( - topicIdPartition, - topicPartitionData.get(topicIdPartition), - logReadResult.toFetchPartitionData(false) - )) - ); + responseData.forEach((topicIdPartition, logReadResult) -> { + if (logReadResult.info().delayedRemoteStorageFetch.isEmpty()) { + shareFetchPartitionDataList.add(new ShareFetchPartitionData( + topicIdPartition, + topicPartitionData.get(topicIdPartition), + logReadResult.toFetchPartitionData(false) + )); + } + }); shareFetch.maybeComplete(ShareFetchUtils.processFetchResponse( shareFetch, @@ -225,25 +298,47 @@ private void completeShareFetchRequest(LinkedHashMap top log.error("Error processing delayed share fetch request", e); handleFetchException(shareFetch, topicPartitionData.keySet(), e); } finally { - // Releasing the lock to move ahead with the next request in queue. - releasePartitionLocks(topicPartitionData.keySet()); - // If we have a fetch request completed for a topic-partition, we release the locks for that partition, - // then we should check if there is a pending share fetch request for the topic-partition and complete it. - // We add the action to delayed actions queue to avoid an infinite call stack, which could happen if - // we directly call delayedShareFetchPurgatory.checkAndComplete - replicaManager.addToActionQueue(() -> topicPartitionData.keySet().forEach(topicIdPartition -> - replicaManager.completeDelayedShareFetchRequest( - new DelayedShareFetchGroupKey(shareFetch.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())))); + releasePartitionLocksAndAddToActionQueue(topicPartitionData.keySet()); } } + /** + * This function updates the cached fetch offset metadata to null corresponding to the share partition's fetch offset. + * This is required in the case when a topic partition that has local log fetch during tryComplete, but changes to remote + * storage fetch in onComplete. In this situation, if the cached fetchOffsetMetadata got updated in tryComplete, then + * we will enter a state where each share fetch request for this topic partition from client will use the cached + * fetchOffsetMetadata in tryComplete and return an empty response to the client from onComplete. + * Hence, we require to set offsetMetadata to null for this fetch offset, which would cause tryComplete to update + * fetchOffsetMetadata and thereby we will identify this partition for remote storage fetch. + * @param topicPartitionData - Map containing the fetch offset for the topic partitions. + * @param replicaManagerReadResponse - Map containing the readFromLog response from replicaManager for the topic partitions. + */ + private void resetFetchOffsetMetadataForRemoteFetchPartitions( + LinkedHashMap topicPartitionData, + LinkedHashMap replicaManagerReadResponse + ) { + replicaManagerReadResponse.forEach((topicIdPartition, logReadResult) -> { + if (logReadResult.info().delayedRemoteStorageFetch.isPresent()) { + SharePartition sharePartition = sharePartitions.get(topicIdPartition); + sharePartition.updateFetchOffsetMetadata( + topicPartitionData.get(topicIdPartition), + null + ); + } + }); + } + /** * Try to complete the fetch operation if we can acquire records for any partition in the share fetch request. */ @Override public boolean tryComplete() { - LinkedHashMap topicPartitionData = acquirablePartitions(); + // Check to see if the remote fetch is in flight. If there is an in flight remote fetch we want to resolve it first. + if (pendingRemoteFetchesOpt.isPresent()) { + return maybeCompletePendingRemoteFetch(); + } + LinkedHashMap topicPartitionData = acquirablePartitions(sharePartitions); try { if (!topicPartitionData.isEmpty()) { // Update the metric to record the time taken to acquire the locks for the share partitions. @@ -252,17 +347,17 @@ public boolean tryComplete() { // replicaManager.readFromLog to populate the offset metadata and update the fetch offset metadata for // those topic partitions. LinkedHashMap replicaManagerReadResponse = maybeReadFromLog(topicPartitionData); + // Store the remote fetch info for the topic partitions for which we need to perform remote fetch. + LinkedHashMap remoteStorageFetchInfoMap = maybePrepareRemoteStorageFetchInfo(topicPartitionData, replicaManagerReadResponse); + + if (!remoteStorageFetchInfoMap.isEmpty()) { + return maybeProcessRemoteFetch(topicPartitionData, remoteStorageFetchInfoMap); + } maybeUpdateFetchOffsetMetadata(topicPartitionData, replicaManagerReadResponse); if (anyPartitionHasLogReadError(replicaManagerReadResponse) || isMinBytesSatisfied(topicPartitionData, partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size()))) { partitionsAcquired = topicPartitionData; - partitionsAlreadyFetched = replicaManagerReadResponse; - boolean completedByMe = forceComplete(); - // If invocation of forceComplete is not successful, then that means the request is already completed - // hence release the acquired locks. - if (!completedByMe) { - releasePartitionLocks(partitionsAcquired.keySet()); - } - return completedByMe; + localPartitionsAlreadyFetched = replicaManagerReadResponse; + return forceComplete(); } else { log.debug("minBytes is not satisfied for the share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), @@ -270,16 +365,30 @@ public boolean tryComplete() { releasePartitionLocks(topicPartitionData.keySet()); } } else { - log.trace("Can't acquire records for any partition in the share fetch request for group {}, member {}, " + + log.trace("Can't acquire any partitions in the share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet()); } + // At this point, there could be delayed requests sitting in the purgatory which are waiting on + // DelayedShareFetchPartitionKeys corresponding to partitions, whose leader has been changed to a different broker. + // In that case, such partitions would not be able to get acquired, and the tryComplete will keep on returning false. + // Eventually the operation will get timed out and completed, but it might not get removed from the purgatory. + // This has been eventually left it like this because the purging mechanism will trigger whenever the number of completed + // but still being watched operations is larger than the purge interval. This purge interval is defined by the config + // share.fetch.purgatory.purge.interval.requests and is 1000 by default, thereby ensuring that such stale operations do not + // grow indefinitely. return false; } catch (Exception e) { log.error("Error processing delayed share fetch request", e); - releasePartitionLocks(topicPartitionData.keySet()); - partitionsAcquired.clear(); - partitionsAlreadyFetched.clear(); + // In case we have a remote fetch exception, we have already released locks for partitions which have potential + // local log read. We do not release locks for partitions which have a remote storage read because we need to + // complete the share fetch request in onComplete and if we release the locks early here, some other DelayedShareFetch + // request might get the locks for those partitions without this one getting complete. + if (remoteStorageFetchException.isEmpty()) { + releasePartitionLocks(topicPartitionData.keySet()); + partitionsAcquired.clear(); + localPartitionsAlreadyFetched.clear(); + } return forceComplete(); } } @@ -288,27 +397,31 @@ public boolean tryComplete() { * Prepare fetch request structure for partitions in the share fetch request for which we can acquire records. */ // Visible for testing - LinkedHashMap acquirablePartitions() { + LinkedHashMap acquirablePartitions( + LinkedHashMap sharePartitionsForAcquire + ) { // Initialize the topic partitions for which the fetch should be attempted. LinkedHashMap topicPartitionData = new LinkedHashMap<>(); - sharePartitions.forEach((topicIdPartition, sharePartition) -> { + sharePartitionsForAcquire.forEach((topicIdPartition, sharePartition) -> { // Add the share partition to the list of partitions to be fetched only if we can // acquire the fetch lock on it. - if (sharePartition.maybeAcquireFetchLock()) { + if (sharePartition.maybeAcquireFetchLock(fetchId)) { try { + log.trace("Fetch lock for share partition {}-{} has been acquired by {}", shareFetch.groupId(), topicIdPartition, fetchId); // If the share partition is already at capacity, we should not attempt to fetch. if (sharePartition.canAcquireRecords()) { topicPartitionData.put(topicIdPartition, sharePartition.nextFetchOffset()); } else { - sharePartition.releaseFetchLock(); - log.trace("Record lock partition limit exceeded for SharePartition {}, " + - "cannot acquire more records", sharePartition); + sharePartition.releaseFetchLock(fetchId); + log.trace("Record lock partition limit exceeded for SharePartition {}-{}, " + + "cannot acquire more records. Releasing the fetch lock by {}", shareFetch.groupId(), topicIdPartition, fetchId); } } catch (Exception e) { - log.error("Error checking condition for SharePartition: {}", sharePartition, e); + log.error("Error checking condition for SharePartition: {}-{}", shareFetch.groupId(), topicIdPartition, e); // Release the lock, if error occurred. - sharePartition.releaseFetchLock(); + sharePartition.releaseFetchLock(fetchId); + log.trace("Fetch lock for share partition {}-{} is being released by {}", shareFetch.groupId(), topicIdPartition, fetchId); } } }); @@ -408,11 +521,11 @@ private LogOffsetMetadata endOffsetMetadataForTopicPartition(TopicIdPartition to // extend it to support other FetchIsolation types. FetchIsolation isolationType = shareFetch.fetchParams().isolation; if (isolationType == FetchIsolation.LOG_END) - return offsetSnapshot.logEndOffset; + return offsetSnapshot.logEndOffset(); else if (isolationType == FetchIsolation.HIGH_WATERMARK) - return offsetSnapshot.highWatermark; + return offsetSnapshot.highWatermark(); else - return offsetSnapshot.lastStableOffset; + return offsetSnapshot.lastStableOffset(); } @@ -520,7 +633,8 @@ LinkedHashMap combineLogReadResponse(LinkedHash void releasePartitionLocks(Set topicIdPartitions) { topicIdPartitions.forEach(tp -> { SharePartition sharePartition = sharePartitions.get(tp); - sharePartition.releaseFetchLock(); + sharePartition.releaseFetchLock(fetchId); + log.trace("Fetch lock for share partition {}-{} is being released by {}", shareFetch.groupId(), tp, fetchId); }); } @@ -529,8 +643,347 @@ Lock lock() { return lock; } + // Visible for testing. + PendingRemoteFetches pendingRemoteFetches() { + return pendingRemoteFetchesOpt.orElse(null); + } + + // Visible for testing. + boolean outsidePurgatoryCallbackLock() { + return outsidePurgatoryCallbackLock.get(); + } + + // Only used for testing purpose. + void updatePartitionsAcquired(LinkedHashMap partitionsAcquired) { + this.partitionsAcquired = partitionsAcquired; + } + // Visible for testing. Meter expiredRequestMeter() { return expiredRequestMeter; } + + private LinkedHashMap maybePrepareRemoteStorageFetchInfo( + LinkedHashMap topicPartitionData, + LinkedHashMap replicaManagerReadResponse + ) { + LinkedHashMap remoteStorageFetchInfoMap = new LinkedHashMap<>(); + for (Map.Entry entry : replicaManagerReadResponse.entrySet()) { + TopicIdPartition topicIdPartition = entry.getKey(); + LogReadResult logReadResult = entry.getValue(); + if (logReadResult.info().delayedRemoteStorageFetch.isPresent()) { + remoteStorageFetchInfoMap.put(topicIdPartition, logReadResult); + partitionsAcquired.put(topicIdPartition, topicPartitionData.get(topicIdPartition)); + } + } + return remoteStorageFetchInfoMap; + } + + private boolean maybeProcessRemoteFetch( + LinkedHashMap topicPartitionData, + LinkedHashMap remoteStorageFetchInfoMap + ) { + Set nonRemoteFetchTopicPartitions = new LinkedHashSet<>(); + topicPartitionData.keySet().forEach(topicIdPartition -> { + // non-remote storage fetch topic partitions for which fetch would not be happening in this share fetch request. + if (!remoteStorageFetchInfoMap.containsKey(topicIdPartition)) { + nonRemoteFetchTopicPartitions.add(topicIdPartition); + } + }); + // Release fetch lock for the topic partitions that were acquired but were not a part of remote fetch and add + // them to the delayed actions queue. + releasePartitionLocksAndAddToActionQueue(nonRemoteFetchTopicPartitions); + processRemoteFetchOrException(remoteStorageFetchInfoMap); + // Check if remote fetch can be completed. + return maybeCompletePendingRemoteFetch(); + } + + private boolean maybeRegisterCallbackPendingRemoteFetch() { + log.trace("Registering callback pending remote fetch"); + PendingRemoteFetches pendingFetch = pendingRemoteFetchesOpt.get(); + if (!pendingFetch.isDone() && shareFetch.fetchParams().maxWaitMs < remoteFetchMaxWaitMs) { + TimerTask timerTask = new PendingRemoteFetchTimerTask(); + pendingFetch.invokeCallbackOnCompletion(((ignored, throwable) -> { + timerTask.cancel(); + log.trace("Invoked remote storage fetch callback for group {}, member {}, " + + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), + partitionsAcquired.keySet()); + if (throwable != null) { + log.error("Remote storage fetch failed for group {}, member {}, topic partitions {}", + shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet(), throwable); + } + completeRemoteShareFetchRequestOutsidePurgatory(); + })); + replicaManager.addShareFetchTimerRequest(timerTask); + return true; + } + return false; + } + + /** + * Throws an exception if a task for remote storage fetch could not be scheduled successfully else updates pendingRemoteFetchesOpt. + * @param remoteStorageFetchInfoMap - The remote storage fetch information. + */ + private void processRemoteFetchOrException( + LinkedHashMap remoteStorageFetchInfoMap + ) { + LinkedHashMap fetchOffsetMetadataMap = new LinkedHashMap<>(); + remoteStorageFetchInfoMap.forEach((topicIdPartition, logReadResult) -> fetchOffsetMetadataMap.put( + topicIdPartition, + logReadResult.info().fetchOffsetMetadata + )); + + List remoteFetches = new ArrayList<>(); + for (Map.Entry entry : remoteStorageFetchInfoMap.entrySet()) { + TopicIdPartition remoteFetchTopicIdPartition = entry.getKey(); + RemoteStorageFetchInfo remoteStorageFetchInfo = entry.getValue().info().delayedRemoteStorageFetch.get(); + + Future remoteFetchTask; + CompletableFuture remoteFetchResult = new CompletableFuture<>(); + try { + remoteFetchTask = replicaManager.remoteLogManager().get().asyncRead( + remoteStorageFetchInfo, + result -> { + remoteFetchResult.complete(result); + replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(shareFetch.groupId(), remoteFetchTopicIdPartition.topicId(), remoteFetchTopicIdPartition.partition())); + } + ); + } catch (Exception e) { + // Cancel the already created remote fetch tasks in case an exception occurs. + remoteFetches.forEach(this::cancelRemoteFetchTask); + // Throw the error if any in scheduling the remote fetch task. + remoteStorageFetchException = Optional.of(e); + throw e; + } + remoteFetches.add(new RemoteFetch(remoteFetchTopicIdPartition, entry.getValue(), remoteFetchTask, remoteFetchResult, remoteStorageFetchInfo)); + } + pendingRemoteFetchesOpt = Optional.of(new PendingRemoteFetches(remoteFetches, fetchOffsetMetadataMap)); + } + + /** + * This function checks if the remote fetch can be completed or not. It should always be called once you confirm pendingRemoteFetchesOpt.isPresent(). + * The operation can be completed if: + * Case a: The partition is in an offline log directory on this broker + * Case b: This broker does not know the partition it tries to fetch + * Case c: This broker is no longer the leader of the partition it tries to fetch + * Case d: This broker is no longer the leader or follower of the partition it tries to fetch + * Case e: All remote storage read requests completed + * @return boolean representing whether the remote fetch is completed or not. + */ + private boolean maybeCompletePendingRemoteFetch() { + boolean canComplete = false; + + for (TopicIdPartition topicIdPartition : pendingRemoteFetchesOpt.get().fetchOffsetMetadataMap().keySet()) { + try { + Partition partition = replicaManager.getPartitionOrException(topicIdPartition.topicPartition()); + if (!partition.isLeader()) { + throw new NotLeaderException("Broker is no longer the leader of topicPartition: " + topicIdPartition); + } + } catch (KafkaStorageException e) { // Case a + log.debug("TopicPartition {} is in an offline log directory, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); + canComplete = true; + } catch (UnknownTopicOrPartitionException e) { // Case b + log.debug("Broker no longer knows of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); + canComplete = true; + } catch (NotLeaderException e) { // Case c + log.debug("Broker is no longer the leader of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); + canComplete = true; + } catch (NotLeaderOrFollowerException e) { // Case d + log.debug("Broker is no longer the leader or follower of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); + canComplete = true; + } + if (canComplete) + break; + } + + if (canComplete || pendingRemoteFetchesOpt.get().isDone()) { // Case e + return forceComplete(); + } else + return false; + } + + /** + * This function completes a share fetch request for which we have identified erroneous remote storage fetch in tryComplete() + * It should only be called when we know that there is remote fetch in-flight/completed. + */ + private void completeErroneousRemoteShareFetchRequest() { + try { + handleFetchException(shareFetch, partitionsAcquired.keySet(), remoteStorageFetchException.get()); + } finally { + releasePartitionLocksAndAddToActionQueue(partitionsAcquired.keySet()); + } + + } + + private void releasePartitionLocksAndAddToActionQueue(Set topicIdPartitions) { + if (topicIdPartitions.isEmpty()) { + return; + } + // Releasing the lock to move ahead with the next request in queue. + releasePartitionLocks(topicIdPartitions); + replicaManager.addToActionQueue(() -> topicIdPartitions.forEach(topicIdPartition -> { + // If we have a fetch request completed for a share-partition, we release the locks for that partition, + // then we should check if there is a pending share fetch request for the share-partition and complete it. + // We add the action to delayed actions queue to avoid an infinite call stack, which could happen if + // we directly call delayedShareFetchPurgatory.checkAndComplete. + replicaManager.completeDelayedShareFetchRequest( + new DelayedShareFetchGroupKey(shareFetch.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())); + // As DelayedShareFetch operation is watched over multiple keys, same operation might be + // completed and can contain references to data fetched. Hence, if the operation is not + // removed from other watched keys then there can be a memory leak. The removal of the + // operation is dependent on the purge task by DelayedOperationPurgatory. Hence, this can + // also be prevented by setting smaller value for configuration {@link ShareGroupConfig#SHARE_FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG}. + // However, it's best to trigger the check on all the keys that are being watched which + // should free the memory for the completed operation. + replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchPartitionKey(topicIdPartition)); + })); + } + + /** + * This function completes a share fetch request for which we have identified remoteFetch during tryComplete() + * Note - This function should only be called when we know that there is remote fetch. + */ + private void completeRemoteStorageShareFetchRequest() { + LinkedHashMap acquiredNonRemoteFetchTopicPartitionData = new LinkedHashMap<>(); + try { + List shareFetchPartitionData = new ArrayList<>(); + int readableBytes = 0; + for (RemoteFetch remoteFetch : pendingRemoteFetchesOpt.get().remoteFetches()) { + if (remoteFetch.remoteFetchResult().isDone()) { + RemoteLogReadResult remoteLogReadResult = remoteFetch.remoteFetchResult().get(); + if (remoteLogReadResult.error().isPresent()) { + Throwable error = remoteLogReadResult.error().get(); + // If there is any error for the remote fetch topic partition, we populate the error accordingly. + shareFetchPartitionData.add( + new ShareFetchPartitionData( + remoteFetch.topicIdPartition(), + partitionsAcquired.get(remoteFetch.topicIdPartition()), + ReplicaManager.createLogReadResult(error).toFetchPartitionData(false) + ) + ); + } else { + FetchDataInfo info = remoteLogReadResult.fetchDataInfo().get(); + TopicIdPartition topicIdPartition = remoteFetch.topicIdPartition(); + LogReadResult logReadResult = remoteFetch.logReadResult(); + shareFetchPartitionData.add( + new ShareFetchPartitionData( + topicIdPartition, + partitionsAcquired.get(remoteFetch.topicIdPartition()), + new FetchPartitionData( + logReadResult.error(), + logReadResult.highWatermark(), + logReadResult.leaderLogStartOffset(), + info.records, + Optional.empty(), + logReadResult.lastStableOffset().isPresent() ? OptionalLong.of(logReadResult.lastStableOffset().getAsLong()) : OptionalLong.empty(), + info.abortedTransactions, + logReadResult.preferredReadReplica().isPresent() ? OptionalInt.of(logReadResult.preferredReadReplica().getAsInt()) : OptionalInt.empty(), + false + ) + ) + ); + readableBytes += info.records.sizeInBytes(); + } + } else { + cancelRemoteFetchTask(remoteFetch); + } + } + + // If remote fetch bytes < shareFetch.fetchParams().maxBytes, then we will try for a local read. + if (readableBytes < shareFetch.fetchParams().maxBytes) { + // Get the local log read based topic partitions. + LinkedHashMap nonRemoteFetchSharePartitions = new LinkedHashMap<>(); + sharePartitions.forEach((topicIdPartition, sharePartition) -> { + if (!partitionsAcquired.containsKey(topicIdPartition)) { + nonRemoteFetchSharePartitions.put(topicIdPartition, sharePartition); + } + }); + acquiredNonRemoteFetchTopicPartitionData = acquirablePartitions(nonRemoteFetchSharePartitions); + if (!acquiredNonRemoteFetchTopicPartitionData.isEmpty()) { + log.trace("Fetchable local share partitions for a remote share fetch request data: {} with groupId: {} fetch params: {}", + acquiredNonRemoteFetchTopicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); + + LinkedHashMap responseData = readFromLog( + acquiredNonRemoteFetchTopicPartitionData, + partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes - readableBytes, acquiredNonRemoteFetchTopicPartitionData.keySet(), acquiredNonRemoteFetchTopicPartitionData.size())); + resetFetchOffsetMetadataForRemoteFetchPartitions(acquiredNonRemoteFetchTopicPartitionData, responseData); + for (Map.Entry entry : responseData.entrySet()) { + if (entry.getValue().info().delayedRemoteStorageFetch.isEmpty()) { + shareFetchPartitionData.add( + new ShareFetchPartitionData( + entry.getKey(), + acquiredNonRemoteFetchTopicPartitionData.get(entry.getKey()), + entry.getValue().toFetchPartitionData(false) + ) + ); + } + } + } + } + + // Update metric to record acquired to requested partitions. + double acquiredRatio = (double) (partitionsAcquired.size() + acquiredNonRemoteFetchTopicPartitionData.size()) / shareFetch.topicIdPartitions().size(); + if (acquiredRatio > 0) + shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (acquiredRatio * 100)); + + Map remoteFetchResponse = ShareFetchUtils.processFetchResponse( + shareFetch, shareFetchPartitionData, sharePartitions, replicaManager, exceptionHandler); + shareFetch.maybeComplete(remoteFetchResponse); + log.trace("Remote share fetch request completed successfully, response: {}", remoteFetchResponse); + } catch (InterruptedException | ExecutionException e) { + log.error("Exception occurred in completing remote fetch {} for delayed share fetch request {}", pendingRemoteFetchesOpt.get(), e); + handleExceptionInCompletingRemoteStorageShareFetchRequest(acquiredNonRemoteFetchTopicPartitionData.keySet(), e); + } catch (Exception e) { + log.error("Unexpected error in processing delayed share fetch request", e); + handleExceptionInCompletingRemoteStorageShareFetchRequest(acquiredNonRemoteFetchTopicPartitionData.keySet(), e); + } finally { + Set topicIdPartitions = new LinkedHashSet<>(partitionsAcquired.keySet()); + topicIdPartitions.addAll(acquiredNonRemoteFetchTopicPartitionData.keySet()); + releasePartitionLocksAndAddToActionQueue(topicIdPartitions); + } + } + + private void handleExceptionInCompletingRemoteStorageShareFetchRequest( + Set acquiredNonRemoteFetchTopicPartitions, + Exception e + ) { + Set topicIdPartitions = new LinkedHashSet<>(partitionsAcquired.keySet()); + topicIdPartitions.addAll(acquiredNonRemoteFetchTopicPartitions); + handleFetchException(shareFetch, topicIdPartitions, e); + } + + /** + * Cancel the remote storage read task, if it has not been executed yet and avoid interrupting the task if it is + * already running as it may force closing opened/cached resources as transaction index. + * Note - This function should only be called when we know that there is remote fetch. + */ + private void cancelRemoteFetchTask(RemoteFetch remoteFetch) { + boolean cancelled = remoteFetch.remoteFetchTask().cancel(false); + if (!cancelled) { + log.debug("Remote fetch task for RemoteStorageFetchInfo: {} could not be cancelled and its isDone value is {}", + remoteFetch.remoteFetchInfo(), remoteFetch.remoteFetchTask().isDone()); + } + } + + private void completeRemoteShareFetchRequestOutsidePurgatory() { + if (outsidePurgatoryCallbackLock.compareAndSet(false, true)) { + completeRemoteStorageShareFetchRequest(); + } + } + + private class PendingRemoteFetchTimerTask extends TimerTask { + + public PendingRemoteFetchTimerTask() { + super(remoteFetchMaxWaitMs - shareFetch.fetchParams().maxWaitMs); + } + + @Override + public void run() { + log.trace("Expired remote storage fetch callback for group {}, member {}, " + + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), + partitionsAcquired.keySet()); + expiredRequestMeter.mark(); + completeRemoteShareFetchRequestOutsidePurgatory(); + } + } } diff --git a/core/src/main/java/kafka/server/share/PendingRemoteFetches.java b/core/src/main/java/kafka/server/share/PendingRemoteFetches.java new file mode 100644 index 0000000000000..575a32ef4662e --- /dev/null +++ b/core/src/main/java/kafka/server/share/PendingRemoteFetches.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server.share; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.server.LogReadResult; +import org.apache.kafka.storage.internals.log.LogOffsetMetadata; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.function.BiConsumer; + +/** + * This class is used to store the remote storage fetch information for topic partitions in a share fetch request. + */ +public class PendingRemoteFetches { + private final List remoteFetches; + private final LinkedHashMap fetchOffsetMetadataMap; + + PendingRemoteFetches(List remoteFetches, LinkedHashMap fetchOffsetMetadataMap) { + this.remoteFetches = remoteFetches; + this.fetchOffsetMetadataMap = fetchOffsetMetadataMap; + } + + public boolean isDone() { + for (RemoteFetch remoteFetch : remoteFetches) { + if (!remoteFetch.remoteFetchResult.isDone()) + return false; + } + return true; + } + + public void invokeCallbackOnCompletion(BiConsumer callback) { + List> remoteFetchResult = new ArrayList<>(); + remoteFetches.forEach(remoteFetch -> remoteFetchResult.add(remoteFetch.remoteFetchResult())); + CompletableFuture.allOf(remoteFetchResult.toArray(new CompletableFuture[0])).whenComplete(callback); + } + + public List remoteFetches() { + return remoteFetches; + } + + public LinkedHashMap fetchOffsetMetadataMap() { + return fetchOffsetMetadataMap; + } + + @Override + public String toString() { + return "PendingRemoteFetches(" + + "remoteFetches=" + remoteFetches + + ", fetchOffsetMetadataMap=" + fetchOffsetMetadataMap + + ")"; + } + + public record RemoteFetch( + TopicIdPartition topicIdPartition, + LogReadResult logReadResult, + Future remoteFetchTask, + CompletableFuture remoteFetchResult, + RemoteStorageFetchInfo remoteFetchInfo + ) { + @Override + public String toString() { + return "RemoteFetch(" + + "topicIdPartition=" + topicIdPartition + + ", logReadResult=" + logReadResult + + ", remoteFetchTask=" + remoteFetchTask + + ", remoteFetchResult=" + remoteFetchResult + + ", remoteFetchInfo=" + remoteFetchInfo + + ")"; + } + } +} diff --git a/core/src/main/java/kafka/server/share/ShareFetchUtils.java b/core/src/main/java/kafka/server/share/ShareFetchUtils.java index 3cfab25e6845a..ba9e5368bcfd8 100644 --- a/core/src/main/java/kafka/server/share/ShareFetchUtils.java +++ b/core/src/main/java/kafka/server/share/ShareFetchUtils.java @@ -27,8 +27,9 @@ import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch; import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.Records; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.coordinator.group.GroupConfigManager; @@ -83,7 +84,7 @@ static Map processFetchR if (fetchPartitionData.error.code() != Errors.NONE.code()) { partitionData - .setRecords(null) + .setRecords(MemoryRecords.EMPTY) .setErrorCode(fetchPartitionData.error.code()) .setErrorMessage(fetchPartitionData.error.message()) .setAcquiredRecords(List.of()); @@ -123,7 +124,7 @@ static Map processFetchR // if we want parallel requests for the same share partition or not. if (shareAcquiredRecords.acquiredRecords().isEmpty()) { partitionData - .setRecords(null) + .setRecords(MemoryRecords.EMPTY) .setAcquiredRecords(List.of()); } else { partitionData @@ -204,20 +205,17 @@ static Partition partition(ReplicaManager replicaManager, TopicPartition tp) { * * @param records The records to be sliced. * @param shareAcquiredRecords The share acquired records containing the non-empty acquired records. - * @return The sliced records, if the records are of type FileRecords and the acquired records are a subset - * of the fetched records. Otherwise, the original records are returned. + * @return The sliced records, if the acquired records are a subset of the fetched records. Otherwise, + * the original records are returned. */ static Records maybeSliceFetchRecords(Records records, ShareAcquiredRecords shareAcquiredRecords) { - if (!(records instanceof FileRecords fileRecords)) { - return records; - } // The acquired records should be non-empty, do not check as the method is called only when the // acquired records are non-empty. List acquiredRecords = shareAcquiredRecords.acquiredRecords(); try { - final Iterator iterator = fileRecords.batchIterator(); + final Iterator iterator = records.batchIterator(); // Track the first overlapping batch with the first acquired offset. - FileChannelRecordBatch firstOverlapBatch = iterator.next(); + RecordBatch firstOverlapBatch = iterator.next(); // If there exists single fetch batch, then return the original records. if (!iterator.hasNext()) { return records; @@ -229,7 +227,7 @@ static Records maybeSliceFetchRecords(Records records, ShareAcquiredRecords shar int size = 0; // Start iterating from the second batch. while (iterator.hasNext()) { - FileChannelRecordBatch batch = iterator.next(); + RecordBatch batch = iterator.next(); // Iterate until finds the first overlap batch with the first acquired offset. All the // batches before this first overlap batch should be sliced hence increment the start // position. @@ -248,10 +246,10 @@ static Records maybeSliceFetchRecords(Records records, ShareAcquiredRecords shar // acquired offset. size += firstOverlapBatch.sizeInBytes(); // Check if we do not need slicing i.e. neither start position nor size changed. - if (startPosition == 0 && size == fileRecords.sizeInBytes()) { + if (startPosition == 0 && size == records.sizeInBytes()) { return records; } - return fileRecords.slice(startPosition, size); + return records.slice(startPosition, size); } catch (Exception e) { log.error("Error while checking batches for acquired records: {}, skipping slicing.", acquiredRecords, e); // If there is an exception while slicing, return the original records so that the fetch diff --git a/core/src/main/java/kafka/server/share/SharePartition.java b/core/src/main/java/kafka/server/share/SharePartition.java index db2ff4fc8cfeb..2c33007673363 100644 --- a/core/src/main/java/kafka/server/share/SharePartition.java +++ b/core/src/main/java/kafka/server/share/SharePartition.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; -import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRequestException; @@ -42,8 +41,14 @@ import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.coordinator.group.ShareGroupAutoOffsetResetStrategy; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; +import org.apache.kafka.server.share.fetch.AcquisitionLockTimeoutHandler; +import org.apache.kafka.server.share.fetch.AcquisitionLockTimerTask; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; +import org.apache.kafka.server.share.fetch.DeliveryCountOps; +import org.apache.kafka.server.share.fetch.InFlightBatch; +import org.apache.kafka.server.share.fetch.InFlightState; +import org.apache.kafka.server.share.fetch.RecordState; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.metrics.SharePartitionMetrics; import org.apache.kafka.server.share.persister.GroupTopicPartitionData; @@ -60,7 +65,6 @@ import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.slf4j.Logger; @@ -80,7 +84,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -133,70 +137,6 @@ enum SharePartitionState { FENCED } - /** - * The RecordState is used to track the state of a record that has been fetched from the leader. - * The state of the records determines if the records should be re-delivered, move the next fetch - * offset, or be state persisted to disk. - */ - // Visible for testing - enum RecordState { - AVAILABLE((byte) 0), - ACQUIRED((byte) 1), - ACKNOWLEDGED((byte) 2), - ARCHIVED((byte) 4); - - public final byte id; - - RecordState(byte id) { - this.id = id; - } - - /** - * Validates that the newState is one of the valid transition from the current - * {@code RecordState}. - * - * @param newState State into which requesting to transition; must be non-null - * - * @return {@code RecordState} newState if validation succeeds. Returning - * newState helps state assignment chaining. - * - * @throws IllegalStateException if the state transition validation fails. - */ - public RecordState validateTransition(RecordState newState) throws IllegalStateException { - Objects.requireNonNull(newState, "newState cannot be null"); - if (this == newState) { - throw new IllegalStateException("The state transition is invalid as the new state is" - + "the same as the current state"); - } - - if (this == ACKNOWLEDGED || this == ARCHIVED) { - throw new IllegalStateException("The state transition is invalid from the current state: " + this); - } - - if (this == AVAILABLE && newState != ACQUIRED) { - throw new IllegalStateException("The state can only be transitioned to ACQUIRED from AVAILABLE"); - } - - // Either the transition is from Available -> Acquired or from Acquired -> Available/ - // Acknowledged/Archived. - return newState; - } - - public static RecordState forId(byte id) { - return switch (id) { - case 0 -> AVAILABLE; - case 1 -> ACQUIRED; - case 2 -> ACKNOWLEDGED; - case 4 -> ARCHIVED; - default -> throw new IllegalArgumentException("Unknown record state id: " + id); - }; - } - - public byte id() { - return this.id; - } - } - /** * The group id of the share partition belongs to. */ @@ -227,23 +167,19 @@ public byte id() { */ private final ReadWriteLock lock; - /** - * The find next fetch offset is used to indicate if the next fetch offset should be recomputed. - */ - private final AtomicBoolean findNextFetchOffset; - /** * The lock to ensure that the same share partition does not enter a fetch queue - * while another one is being fetched within the queue. + * while another one is being fetched within the queue. The caller's id that acquires the fetch + * lock is utilized for ensuring the above. */ - private final AtomicBoolean fetchLock; + private final AtomicReference fetchLock; /** - * The max in-flight messages is used to limit the number of records that can be in-flight at any - * given time. The max in-flight messages is used to prevent the consumer from fetching too many + * The max in-flight records is used to limit the number of records that can be in-flight at any + * given time. The max in-flight records is used to prevent the consumer from fetching too many * records from the leader and running out of memory. */ - private final int maxInFlightMessages; + private final int maxInFlightRecords; /** * The max delivery count is used to limit the number of times a record can be delivered to the @@ -264,6 +200,11 @@ public byte id() { */ private final int defaultRecordLockDurationMs; + /** + * The find next fetch offset is used to indicate if the next fetch offset should be recomputed. + */ + private boolean findNextFetchOffset; + /** * Timer is used to implement acquisition lock on records that guarantees the movement of records from * acquired to available/archived state upon timeout @@ -295,6 +236,17 @@ public byte id() { */ private final SharePartitionMetrics sharePartitionMetrics; + /** + * The acquisition lock timeout handler is used to handle the acquisition lock timeout for the share partition. + */ + private final AcquisitionLockTimeoutHandler timeoutHandler; + + /** + * The replica manager is used to check to see if any delayed share fetch request can be completed because of data + * availability due to acquisition lock timeout. + */ + private final ReplicaManager replicaManager; + /** * The share partition start offset specifies the partition start offset from which the records * are cached in the cachedState of the sharePartition. @@ -308,16 +260,22 @@ public byte id() { private long endOffset; /** - * The initial read gap offset tracks if there are any gaps in the in-flight batch during initial - * read of the share partition state from the persister. + * The persister read result gap window tracks if there are any gaps in the in-flight batch during + * initial read of the share partition state from the persister. */ - private InitialReadGapOffset initialReadGapOffset; + private GapWindow persisterReadResultGapWindow; /** * We maintain the latest fetch offset and its metadata to estimate the minBytes requirement more efficiently. */ private final OffsetMetadata fetchOffsetMetadata; + /** + * The delayed share fetch key is used to track the delayed share fetch requests that are waiting + * for the respective share partition. + */ + private final DelayedShareFetchKey delayedShareFetchKey; + /** * The state epoch is used to track the version of the state of the share partition. */ @@ -343,17 +301,11 @@ public byte id() { */ private long fetchLockIdleDurationMs; - /** - * The replica manager is used to check to see if any delayed share fetch request can be completed because of data - * availability due to acquisition lock timeout. - */ - private final ReplicaManager replicaManager; - SharePartition( String groupId, TopicIdPartition topicIdPartition, int leaderEpoch, - int maxInFlightMessages, + int maxInFlightRecords, int maxDeliveryCount, int defaultRecordLockDurationMs, Timer timer, @@ -363,7 +315,7 @@ public byte id() { GroupConfigManager groupConfigManager, SharePartitionListener listener ) { - this(groupId, topicIdPartition, leaderEpoch, maxInFlightMessages, maxDeliveryCount, defaultRecordLockDurationMs, + this(groupId, topicIdPartition, leaderEpoch, maxInFlightRecords, maxDeliveryCount, defaultRecordLockDurationMs, timer, time, persister, replicaManager, groupConfigManager, SharePartitionState.EMPTY, listener, new SharePartitionMetrics(groupId, topicIdPartition.topic(), topicIdPartition.partition())); } @@ -374,7 +326,7 @@ public byte id() { String groupId, TopicIdPartition topicIdPartition, int leaderEpoch, - int maxInFlightMessages, + int maxInFlightRecords, int maxDeliveryCount, int defaultRecordLockDurationMs, Timer timer, @@ -389,12 +341,12 @@ public byte id() { this.groupId = groupId; this.topicIdPartition = topicIdPartition; this.leaderEpoch = leaderEpoch; - this.maxInFlightMessages = maxInFlightMessages; + this.maxInFlightRecords = maxInFlightRecords; this.maxDeliveryCount = maxDeliveryCount; this.cachedState = new ConcurrentSkipListMap<>(); this.lock = new ReentrantReadWriteLock(); - this.findNextFetchOffset = new AtomicBoolean(false); - this.fetchLock = new AtomicBoolean(false); + this.findNextFetchOffset = false; + this.fetchLock = new AtomicReference<>(null); this.defaultRecordLockDurationMs = defaultRecordLockDurationMs; this.timer = timer; this.time = time; @@ -404,8 +356,10 @@ public byte id() { this.replicaManager = replicaManager; this.groupConfigManager = groupConfigManager; this.fetchOffsetMetadata = new OffsetMetadata(); + this.delayedShareFetchKey = new DelayedShareFetchGroupKey(groupId, topicIdPartition); this.listener = listener; this.sharePartitionMetrics = sharePartitionMetrics; + this.timeoutHandler = releaseAcquisitionLockOnTimeout(); this.registerGaugeMetrics(); } @@ -482,8 +436,8 @@ public CompletableFuture maybeInitialize() { if (partitionData.errorCode() != Errors.NONE.code()) { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); - log.error("Failed to initialize the share partition: {}-{}. Exception occurred: {}.", - groupId, topicIdPartition, partitionData); + maybeLogError(String.format("Failed to initialize the share partition: %s-%s. Exception occurred: %s.", + groupId, topicIdPartition, partitionData), Errors.forCode(partitionData.errorCode()), ex); throwable = ex; return; } @@ -509,8 +463,9 @@ public CompletableFuture maybeInitialize() { gapStartOffset = previousBatchLastOffset + 1; } previousBatchLastOffset = stateBatch.lastOffset(); - InFlightBatch inFlightBatch = new InFlightBatch(EMPTY_MEMBER_ID, stateBatch.firstOffset(), - stateBatch.lastOffset(), RecordState.forId(stateBatch.deliveryState()), stateBatch.deliveryCount(), null); + InFlightBatch inFlightBatch = new InFlightBatch(timer, time, EMPTY_MEMBER_ID, stateBatch.firstOffset(), + stateBatch.lastOffset(), RecordState.forId(stateBatch.deliveryState()), stateBatch.deliveryCount(), + null, timeoutHandler, sharePartitionMetrics); cachedState.put(stateBatch.firstOffset(), inFlightBatch); sharePartitionMetrics.recordInFlightBatchMessageCount(stateBatch.lastOffset() - stateBatch.firstOffset() + 1); } @@ -518,11 +473,11 @@ public CompletableFuture maybeInitialize() { if (!cachedState.isEmpty()) { // If the cachedState is not empty, findNextFetchOffset flag is set to true so that any AVAILABLE records // in the cached state are not missed - findNextFetchOffset.set(true); + updateFindNextFetchOffset(true); endOffset = cachedState.lastEntry().getValue().lastOffset(); - // initialReadGapOffset is not required, if there are no gaps in the read state response + // gapWindow is not required, if there are no gaps in the read state response if (gapStartOffset != -1) { - initialReadGapOffset = new InitialReadGapOffset(endOffset, gapStartOffset); + persisterReadResultGapWindow = new GapWindow(endOffset, gapStartOffset); } // In case the persister read state RPC result contains no AVAILABLE records, we can update cached state // and start/end offsets. @@ -541,6 +496,9 @@ public CompletableFuture maybeInitialize() { } // Release the lock. lock.writeLock().unlock(); + // Avoid triggering the listener for waiting share fetch requests in purgatory as the + // share partition manager keeps track of same and will trigger the listener for the + // respective share partition. // Complete the future. if (isFailed) { future.completeExceptionally(throwable); @@ -578,7 +536,7 @@ public long nextFetchOffset() { lock.writeLock().lock(); try { // When none of the records in the cachedState are in the AVAILABLE state, findNextFetchOffset will be false - if (!findNextFetchOffset.get()) { + if (!findNextFetchOffset) { if (cachedState.isEmpty() || startOffset > cachedState.lastEntry().getValue().lastOffset()) { // 1. When cachedState is empty, endOffset is set to the next offset of the last // offset removed from batch, which is the next offset to be fetched. @@ -597,37 +555,46 @@ public long nextFetchOffset() { // If cachedState is empty, there is no need of re-computing next fetch offset in future fetch requests. // Same case when startOffset has moved beyond the in-flight records, startOffset and endOffset point to the LSO // and the cached state is fresh. - findNextFetchOffset.set(false); + updateFindNextFetchOffset(false); log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, endOffset); return endOffset; } long nextFetchOffset = -1; - long gapStartOffset = isInitialReadGapOffsetWindowActive() ? initialReadGapOffset.gapStartOffset() : -1; + long gapStartOffset = isPersisterReadGapWindowActive() ? persisterReadResultGapWindow.gapStartOffset() : -1; for (Map.Entry entry : cachedState.entrySet()) { // Check if there exists any gap in the in-flight batch which needs to be fetched. If - // initialReadGapOffset's endOffset is equal to the share partition's endOffset, then + // gapWindow's endOffset is equal to the share partition's endOffset, then // only the initial gaps should be considered. Once share partition's endOffset is past // initial read end offset then all gaps are anyway fetched. - if (isInitialReadGapOffsetWindowActive()) { + if (isPersisterReadGapWindowActive()) { if (entry.getKey() > gapStartOffset) { nextFetchOffset = gapStartOffset; break; } - gapStartOffset = entry.getValue().lastOffset() + 1; + // If the gapStartOffset is already past the last offset of the in-flight batch, + // then do not consider this batch for finding the next fetch offset. For example, + // consider during initialization, the gapWindow is set to 5 and the + // first cached batch is 15-18. First read will happen at offset 5 and say the data + // fetched is [5-6], now next fetch offset should be 7. This works fine but say + // subsequent read returns batch 8-11, and the gapStartOffset will be 12. Without + // the max check, the next fetch offset returned will be 7 which is incorrect. + // The natural gaps for which no data is available shall be considered hence + // take the max of the gapStartOffset and the last offset of the in-flight batch. + gapStartOffset = Math.max(entry.getValue().lastOffset() + 1, gapStartOffset); } // Check if the state is maintained per offset or batch. If the offsetState // is not maintained then the batch state is used to determine the offsets state. if (entry.getValue().offsetState() == null) { - if (entry.getValue().batchState() == RecordState.AVAILABLE) { + if (entry.getValue().batchState() == RecordState.AVAILABLE && !entry.getValue().batchHasOngoingStateTransition()) { nextFetchOffset = entry.getValue().firstOffset(); break; } } else { // The offset state is maintained hence find the next available offset. for (Map.Entry offsetState : entry.getValue().offsetState().entrySet()) { - if (offsetState.getValue().state == RecordState.AVAILABLE) { + if (offsetState.getValue().state() == RecordState.AVAILABLE && !offsetState.getValue().hasOngoingStateTransition()) { nextFetchOffset = offsetState.getKey(); break; } @@ -642,7 +609,7 @@ public long nextFetchOffset() { // If nextFetchOffset is -1, then no AVAILABLE records are found in the cachedState, so there is no need of // re-computing next fetch offset in future fetch requests if (nextFetchOffset == -1) { - findNextFetchOffset.set(false); + updateFindNextFetchOffset(false); nextFetchOffset = endOffset + 1; } log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, nextFetchOffset); @@ -698,7 +665,7 @@ public long nextFetchOffset() { * @param isolationLevel The isolation level for the share fetch request. * @return The acquired records for the share partition. */ - @SuppressWarnings("cyclomaticcomplexity") // Consider refactoring to avoid suppression + @SuppressWarnings({"cyclomaticcomplexity", "methodlength"}) // Consider refactoring to avoid suppression public ShareAcquiredRecords acquire( String memberId, int batchSize, @@ -719,6 +686,16 @@ public ShareAcquiredRecords acquire( return ShareAcquiredRecords.empty(); } + LastOffsetAndMaxRecords lastOffsetAndMaxRecords = lastOffsetAndMaxRecordsToAcquire(fetchOffset, + maxFetchRecords, lastBatch.lastOffset()); + if (lastOffsetAndMaxRecords.maxRecords() <= 0) { + return ShareAcquiredRecords.empty(); + } + // The lastOffsetAndMaxRecords contains the last offset to acquire and the maximum number of records + // to acquire. + int maxRecordsToAcquire = lastOffsetAndMaxRecords.maxRecords(); + long lastOffsetToAcquire = lastOffsetAndMaxRecords.lastOffset(); + // We require the first batch of records to get the base offset. Stop parsing further // batches. RecordBatch firstBatch = fetchPartitionData.records.batches().iterator().next(); @@ -731,16 +708,33 @@ public ShareAcquiredRecords acquire( // Find the floor batch record for the request batch. The request batch could be // for a subset of the in-flight batch i.e. cached batch of offset 10-14 and request batch - // of 12-13. Hence, floor entry is fetched to find the sub-map. + // of 12-13. Hence, floor entry is fetched to find the sub-map. Secondly, when the share + // partition is initialized with persisted state, the start offset might be moved to a later + // offset. In such case, the first batch base offset might be less than the start offset. Map.Entry floorEntry = cachedState.floorEntry(baseOffset); - // We might find a batch with floor entry but not necessarily that batch has an overlap, - // if the request batch base offset is ahead of last offset from floor entry i.e. cached - // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap. - // Such scenario will be handled in the next step when considering the subMap. However, - // if the floor entry is found and the request batch base offset is within the floor entry - // then adjust the base offset to the floor entry so that acquire method can still work on - // previously cached batch boundaries. - if (floorEntry != null && floorEntry.getValue().lastOffset() >= baseOffset) { + if (floorEntry == null) { + // The initialize method check that there couldn't be any batches prior to the start offset. + // And once share partition starts fetching records, it will always fetch records, at least, + // from the start offset, but there could be cases where the batch base offset is prior + // to the start offset. This can happen when the share partition is initialized with + // partial persisted state and moved start offset i.e. start offset is not the batch's + // first offset. In such case, we need to adjust the base offset to the start offset. + // It's safe to adjust the base offset to the start offset when there isn't any floor + // i.e. no cached batches available prior to the request batch base offset. Hence, + // check for the floor entry and adjust the base offset accordingly. + if (baseOffset < startOffset) { + log.info("Adjusting base offset for the fetch as it's prior to start offset: {}-{}" + + "from {} to {}", groupId, topicIdPartition, baseOffset, startOffset); + baseOffset = startOffset; + } + } else if (floorEntry.getValue().lastOffset() >= baseOffset) { + // We might find a batch with floor entry but not necessarily that batch has an overlap, + // if the request batch base offset is ahead of last offset from floor entry i.e. cached + // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap. + // Such scenario will be handled in the next step when considering the subMap. However, + // if the floor entry is found and the request batch base offset is within the floor entry + // then adjust the base offset to the floor entry so that acquire method can still work on + // previously cached batch boundaries. baseOffset = floorEntry.getKey(); } // Validate if the fetch records are already part of existing batches and if available. @@ -750,8 +744,10 @@ public ShareAcquiredRecords acquire( if (subMap.isEmpty()) { log.trace("No cached data exists for the share partition for requested fetch batch: {}-{}", groupId, topicIdPartition); + // Do not send the lastOffsetToAcquire as when the subMap is empty, it means that + // there isn't any overlap itself. ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), - firstBatch.baseOffset(), lastBatch.lastOffset(), batchSize, maxFetchRecords); + firstBatch.baseOffset(), lastBatch.lastOffset(), batchSize, maxRecordsToAcquire); return maybeFilterAbortedTransactionalAcquiredRecords(fetchPartitionData, isolationLevel, shareAcquiredRecords); } @@ -768,27 +764,28 @@ public ShareAcquiredRecords acquire( // be an exact match, subset or span over multiple already fetched batches. for (Map.Entry entry : subMap.entrySet()) { // If the acquired count is equal to the max fetch records then break the loop. - if (acquiredCount >= maxFetchRecords) { + if (acquiredCount >= maxRecordsToAcquire) { break; } InFlightBatch inFlightBatch = entry.getValue(); - // If the initialReadGapOffset window is active, we need to treat the gaps in between the window as + // If the gapWindow window is active, we need to treat the gaps in between the window as // acquirable. Once the window is inactive (when we have acquired all the gaps inside the window), // the remaining gaps are natural (data does not exist at those offsets) and we need not acquire them. - if (isInitialReadGapOffsetWindowActive()) { + if (isPersisterReadGapWindowActive()) { // If nextBatchStartOffset is less than the key of the entry, this means the fetch happened for a gap in the cachedState. // Thus, a new batch needs to be acquired for the gap. if (maybeGapStartOffset < entry.getKey()) { ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), - maybeGapStartOffset, entry.getKey() - 1, batchSize, maxFetchRecords); + maybeGapStartOffset, entry.getKey() - 1, batchSize, maxRecordsToAcquire); result.addAll(shareAcquiredRecords.acquiredRecords()); acquiredCount += shareAcquiredRecords.count(); } - // Set nextBatchStartOffset as the last offset of the current in-flight batch + 1 + // Set nextBatchStartOffset as the last offset of the current in-flight batch + 1. + // Hence, after the loop iteration the next gap can be considered. maybeGapStartOffset = inFlightBatch.lastOffset() + 1; // If the acquired count is equal to the max fetch records then break the loop. - if (acquiredCount >= maxFetchRecords) { + if (acquiredCount >= maxRecordsToAcquire) { break; } } @@ -820,7 +817,7 @@ public ShareAcquiredRecords acquire( // Do not send max fetch records to acquireSubsetBatchRecords as we want to acquire // all the records from the batch as the batch will anyway be part of the file-records // response batch. - int acquiredSubsetCount = acquireSubsetBatchRecords(memberId, firstBatch.baseOffset(), lastBatch.lastOffset(), inFlightBatch, result); + int acquiredSubsetCount = acquireSubsetBatchRecords(memberId, firstBatch.baseOffset(), lastOffsetToAcquire, inFlightBatch, result); acquiredCount += acquiredSubsetCount; continue; } @@ -832,8 +829,8 @@ public ShareAcquiredRecords acquire( continue; } - InFlightState updateResult = inFlightBatch.tryUpdateBatchState(RecordState.ACQUIRED, true, maxDeliveryCount, memberId); - if (updateResult == null) { + InFlightState updateResult = inFlightBatch.tryUpdateBatchState(RecordState.ACQUIRED, DeliveryCountOps.INCREASE, maxDeliveryCount, memberId); + if (updateResult == null || updateResult.state() != RecordState.ACQUIRED) { log.info("Unable to acquire records for the batch: {} in share partition: {}-{}", inFlightBatch, groupId, topicIdPartition); continue; @@ -852,16 +849,16 @@ public ShareAcquiredRecords acquire( // Some of the request offsets are not found in the fetched batches. Acquire the // missing records as well. - if (acquiredCount < maxFetchRecords && subMap.lastEntry().getValue().lastOffset() < lastBatch.lastOffset()) { + if (acquiredCount < maxRecordsToAcquire && subMap.lastEntry().getValue().lastOffset() < lastOffsetToAcquire) { log.trace("There exists another batch which needs to be acquired as well"); ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), subMap.lastEntry().getValue().lastOffset() + 1, - lastBatch.lastOffset(), batchSize, maxFetchRecords - acquiredCount); + lastOffsetToAcquire, batchSize, maxRecordsToAcquire - acquiredCount); result.addAll(shareAcquiredRecords.acquiredRecords()); acquiredCount += shareAcquiredRecords.count(); } if (!result.isEmpty()) { - maybeUpdateReadGapFetchOffset(result.get(result.size() - 1).lastOffset() + 1); + maybeUpdatePersisterGapWindowStartOffset(result.get(result.size() - 1).lastOffset() + 1); return maybeFilterAbortedTransactionalAcquiredRecords(fetchPartitionData, isolationLevel, new ShareAcquiredRecords(result, acquiredCount)); } return new ShareAcquiredRecords(result, acquiredCount); @@ -887,8 +884,7 @@ public CompletableFuture acknowledge( CompletableFuture future = new CompletableFuture<>(); Throwable throwable = null; - List updatedStates = new ArrayList<>(); - List stateBatches = new ArrayList<>(); + List persisterBatches = new ArrayList<>(); lock.writeLock().lock(); try { // Avoided using enhanced for loop as need to check if the last batch have offsets @@ -928,8 +924,7 @@ public CompletableFuture acknowledge( batch, recordStateMap, subMap, - updatedStates, - stateBatches + persisterBatches ); if (ackThrowable.isPresent()) { @@ -937,14 +932,12 @@ public CompletableFuture acknowledge( break; } } - - // If the acknowledgement is successful then persist state, complete the state transition - // and update the cached state for start offset. Else rollback the state transition. - rollbackOrProcessStateUpdates(future, throwable, updatedStates, stateBatches); } finally { lock.writeLock().unlock(); } - + // If the acknowledgement is successful then persist state, complete the state transition + // and update the cached state for start offset. Else rollback the state transition. + rollbackOrProcessStateUpdates(future, throwable, persisterBatches); return future; } @@ -960,8 +953,7 @@ public CompletableFuture releaseAcquiredRecords(String memberId) { CompletableFuture future = new CompletableFuture<>(); Throwable throwable = null; - List updatedStates = new ArrayList<>(); - List stateBatches = new ArrayList<>(); + List persisterBatches = new ArrayList<>(); lock.writeLock().lock(); try { @@ -980,26 +972,25 @@ && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.las } if (inFlightBatch.offsetState() != null) { - Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForPerOffsetBatch(memberId, inFlightBatch, recordState, updatedStates, stateBatches); + Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForPerOffsetBatch(memberId, inFlightBatch, recordState, persisterBatches); if (releaseAcquiredRecordsThrowable.isPresent()) { throwable = releaseAcquiredRecordsThrowable.get(); break; } continue; } - Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForCompleteBatch(memberId, inFlightBatch, recordState, updatedStates, stateBatches); + Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForCompleteBatch(memberId, inFlightBatch, recordState, persisterBatches); if (releaseAcquiredRecordsThrowable.isPresent()) { throwable = releaseAcquiredRecordsThrowable.get(); break; } } - - // If the release acquired records is successful then persist state, complete the state transition - // and update the cached state for start offset. Else rollback the state transition. - rollbackOrProcessStateUpdates(future, throwable, updatedStates, stateBatches); } finally { lock.writeLock().unlock(); } + // If the release acquired records is successful then persist state, complete the state transition + // and update the cached state for start offset. Else rollback the state transition. + rollbackOrProcessStateUpdates(future, throwable, persisterBatches); return future; } @@ -1010,12 +1001,11 @@ long loadStartTimeMs() { private Optional releaseAcquiredRecordsForPerOffsetBatch(String memberId, InFlightBatch inFlightBatch, RecordState recordState, - List updatedStates, - List stateBatches) { + List persisterBatches) { log.trace("Offset tracked batch record found, batch: {} for the share partition: {}-{}", inFlightBatch, groupId, topicIdPartition); - for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { // Check if member id is the owner of the offset. if (!offsetState.getValue().memberId().equals(memberId) && !offsetState.getValue().memberId().equals(EMPTY_MEMBER_ID)) { @@ -1023,10 +1013,11 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe + " partition: {}-{}. Skipping offset.", memberId, offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); return Optional.empty(); } - if (offsetState.getValue().state == RecordState.ACQUIRED) { + if (offsetState.getValue().state() == RecordState.ACQUIRED) { + // These records were fetched but they were not actually delivered to the client. InFlightState updateResult = offsetState.getValue().startStateTransition( offsetState.getKey() < startOffset ? RecordState.ARCHIVED : recordState, - false, + DeliveryCountOps.DECREASE, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1037,16 +1028,10 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe return Optional.of(new InvalidRecordStateException("Unable to release acquired records for the offset")); } - // Successfully updated the state of the offset. - updatedStates.add(updateResult); - stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), - updateResult.state.id, (short) updateResult.deliveryCount)); - - // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. - // This should not change the next fetch offset because the record is not available for acquisition - if (updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); - } + // Successfully updated the state of the offset and created a persister state batch for write to persister. + persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(offsetState.getKey(), + offsetState.getKey(), updateResult.state().id(), (short) updateResult.deliveryCount()))); + // Do not update the next fetch offset as the offset has not completed the transition yet. } } return Optional.empty(); @@ -1055,8 +1040,7 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe private Optional releaseAcquiredRecordsForCompleteBatch(String memberId, InFlightBatch inFlightBatch, RecordState recordState, - List updatedStates, - List stateBatches) { + List persisterBatches) { // Check if member id is the owner of the batch. if (!inFlightBatch.batchMemberId().equals(memberId) && !inFlightBatch.batchMemberId().equals(EMPTY_MEMBER_ID)) { @@ -1072,7 +1056,7 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member if (inFlightBatch.batchState() == RecordState.ACQUIRED) { InFlightState updateResult = inFlightBatch.startBatchStateTransition( inFlightBatch.lastOffset() < startOffset ? RecordState.ARCHIVED : recordState, - false, + DeliveryCountOps.DECREASE, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1082,16 +1066,10 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member return Optional.of(new InvalidRecordStateException("Unable to release acquired records for the batch")); } - // Successfully updated the state of the batch. - updatedStates.add(updateResult); - stateBatches.add(new PersisterStateBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset(), - updateResult.state.id, (short) updateResult.deliveryCount)); - - // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. - // This should not change the next fetch offset because the record is not available for acquisition - if (updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); - } + // Successfully updated the state of the batch and created a persister state batch for write to persister. + persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(inFlightBatch.firstOffset(), + inFlightBatch.lastOffset(), updateResult.state().id(), (short) updateResult.deliveryCount()))); + // Do not update the next fetch offset as the batch has not completed the transition yet. } return Optional.empty(); } @@ -1099,10 +1077,24 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member /** * Updates the cached state, start and end offsets of the share partition as per the new log * start offset. The method is called when the log start offset is moved for the share partition. + *

    + * This method only archives the available records in the cached state that are before the new log + * start offset. It does not persist the archived state batches to the persister, rather it + * updates the cached state and offsets to reflect the new log start offset. The state in persister + * will be updated lazily during the acknowledge/release records API calls or acquisition lock timeout. + *

    + * The AVAILABLE state records can either have ongoing state transition or not. Hence, the archive + * records method will update the state of the records to ARCHIVED and set the terminal state flag + * hence if the transition is rolled back then the state will not be AVAILABLE again. However, + * the ACQUIRED state records will not be archived as they are still in-flight and acknowledge + * method also do not allow the state update for any offsets post the log start offset, hence those + * records will only be archived once acquisition lock timeout occurs. * * @param logStartOffset The new log start offset. */ void updateCacheAndOffsets(long logStartOffset) { + log.debug("Updating cached states for share partition: {}-{} with new log start offset: {}", + groupId, topicIdPartition, logStartOffset); lock.writeLock().lock(); try { if (logStartOffset <= startOffset) { @@ -1125,7 +1117,7 @@ void updateCacheAndOffsets(long logStartOffset) { // If we have transitioned the state of any batch/offset from AVAILABLE to ARCHIVED, // then there is a chance that the next fetch offset can change. if (anyRecordArchived) { - findNextFetchOffset.set(true); + updateFindNextFetchOffset(true); } // The new startOffset will be the log start offset. @@ -1187,7 +1179,7 @@ private void maybeArchiveStaleBatches(long fetchOffset, long baseOffset) { // If we have transitioned the state of any batch/offset from AVAILABLE to ARCHIVED, // then there is a chance that the next fetch offset can change. if (anyRecordArchived) { - findNextFetchOffset.set(true); + updateFindNextFetchOffset(true); } } finally { lock.writeLock().unlock(); @@ -1244,11 +1236,11 @@ private boolean archiveRecords(long startOffset, long endOffset, NavigableMap throw new LeaderNotAvailableException( String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition)); - case FENCED -> throw new FencedStateEpochException( + case FENCED -> throw new LeaderNotAvailableException( String.format("Share partition is fenced %s-%s", groupId, topicIdPartition)); case EMPTY -> // The share partition is not yet initialized. @@ -1467,16 +1462,20 @@ private boolean initializedOrThrowException() { } // Method to reduce the window that tracks gaps in the cachedState - private void maybeUpdateReadGapFetchOffset(long offset) { + private void maybeUpdatePersisterGapWindowStartOffset(long offset) { lock.writeLock().lock(); try { - if (initialReadGapOffset != null) { - if (initialReadGapOffset.endOffset() == endOffset) { - initialReadGapOffset.gapStartOffset(offset); + if (persisterReadResultGapWindow != null) { + // When last cached batch for persister's read gap window is acquired, then endOffset is + // same as the gapWindow's endOffset, but the gap offset to update in the method call + // is endOffset + 1. Hence, do not update the gap start offset if the request offset + // is ahead of the endOffset. + if (persisterReadResultGapWindow.endOffset() == endOffset && offset <= persisterReadResultGapWindow.endOffset()) { + persisterReadResultGapWindow.gapStartOffset(offset); } else { - // The initial read gap offset is not valid anymore as the end offset has moved - // beyond the initial read gap offset. Hence, reset the initial read gap offset. - initialReadGapOffset = null; + // The persister's read gap window is not valid anymore as the end offset has moved + // beyond the read gap window's endOffset. Hence, set the gap window to null. + persisterReadResultGapWindow = null; } } } finally { @@ -1484,6 +1483,55 @@ private void maybeUpdateReadGapFetchOffset(long offset) { } } + /** + * The method calculates the last offset and maximum records to acquire. The adjustment is needed + * to ensure that the records acquired do not exceed the maximum in-flight records limit. + * + * @param fetchOffset The offset from which the records are fetched. + * @param maxFetchRecords The maximum number of records to acquire. + * @param lastOffset The last offset to acquire records to, which is the last offset of the fetched batch. + * @return LastOffsetAndMaxRecords object, containing the last offset to acquire and the maximum records to acquire. + */ + private LastOffsetAndMaxRecords lastOffsetAndMaxRecordsToAcquire(long fetchOffset, int maxFetchRecords, long lastOffset) { + // There can always be records fetched exceeding the max in-flight records limit. Hence, + // we need to check if the share partition has reached the max in-flight records limit + // and only acquire limited records. + int maxRecordsToAcquire; + long lastOffsetToAcquire = lastOffset; + lock.readLock().lock(); + try { + int inFlightRecordsCount = numInFlightRecords(); + // Take minimum of maxFetchRecords and remaining capacity to fill max in-flight records limit. + maxRecordsToAcquire = Math.min(maxFetchRecords, maxInFlightRecords - inFlightRecordsCount); + // If the maxRecordsToAcquire is less than or equal to 0, then ideally (check exists to not + // fetch records for share partitions which are at capacity) the fetch must be happening + // in-between the in-flight batches i.e. some in-flight records have been released (marked + // re-available). In such case, last offset to acquire should be adjusted to the endOffset + // of the share partition, if not adjusted then the records can be acquired post the endOffset. + // For example, if 30 records are already acquired i.e. [0-29] and single offset 20 is released + // then the next fetch request will be at 20. Difference from endOffset will be 10, which + // means that some offset past the endOffset can be acquired (21-29 are already acquired). + // Hence, the lastOffsetToAcquire should be adjusted to the endOffset. + if (maxRecordsToAcquire <= 0) { + if (fetchOffset <= endOffset()) { + // Adjust the max records to acquire to the capacity available to fill the max + // in-flight records limit. This can happen when the fetch is happening in-between + // the in-flight batches and the share partition has reached the max in-flight records limit. + maxRecordsToAcquire = Math.min(maxFetchRecords, (int) (endOffset() - fetchOffset + 1)); + // Adjust the last offset to acquire to the endOffset of the share partition. + lastOffsetToAcquire = endOffset(); + } else { + // The share partition is already at max in-flight records, hence cannot acquire more records. + log.debug("Share partition {}-{} has reached max in-flight records limit: {}. Cannot acquire more records, inflight records count: {}", + groupId, topicIdPartition, maxInFlightRecords, inFlightRecordsCount); + } + } + } finally { + lock.readLock().unlock(); + } + return new LastOffsetAndMaxRecords(lastOffsetToAcquire, maxRecordsToAcquire); + } + private ShareAcquiredRecords acquireNewBatchRecords( String memberId, Iterable batches, @@ -1503,14 +1551,28 @@ private ShareAcquiredRecords acquireNewBatchRecords( firstAcquiredOffset = endOffset; } - // Check how many messages can be acquired from the batch. + // Check how many records can be acquired from the batch. long lastAcquiredOffset = lastOffset; if (maxFetchRecords < lastAcquiredOffset - firstAcquiredOffset + 1) { - // The max messages to acquire is less than the complete available batches hence + // The max records to acquire is less than the complete available batches hence // limit the acquired records. The last offset shall be the batches last offset - // which falls under the max messages limit. As the max fetch records is the soft - // limit, the last offset can be higher than the max messages. + // which falls under the max records limit. As the max fetch records is the soft + // limit, the last offset can be higher than the max records. lastAcquiredOffset = lastOffsetFromBatchWithRequestOffset(batches, firstAcquiredOffset + maxFetchRecords - 1); + // If the initial read gap offset window is active then it's not guaranteed that the + // batches align on batch boundaries. Hence, reset to last offset itself if the batch's + // last offset is greater than the last offset for acquisition, else there could be + // a situation where the batch overlaps with the initial read gap offset window batch. + // For example, if the initial read gap offset window is 10-30 i.e. gapWindow's + // startOffset is 10 and endOffset is 30, and the first persister's read batch is 15-30. + // Say first fetched batch from log is 10-30 and maxFetchRecords is 1, then the lastOffset + // in this method call would be 14. As the maxFetchRecords is lesser than the batch, + // hence last batch offset for request offset is fetched. In this example it will + // be 30, hence check if the initial read gap offset window is active and the last acquired + // offset should be adjusted to 14 instead of 30. + if (isPersisterReadGapWindowActive() && lastAcquiredOffset > lastOffset) { + lastAcquiredOffset = lastOffset; + } } // Create batches of acquired records. @@ -1527,7 +1589,7 @@ private ShareAcquiredRecords acquireNewBatchRecords( if (lastAcquiredOffset > endOffset) { endOffset = lastAcquiredOffset; } - maybeUpdateReadGapFetchOffset(lastAcquiredOffset + 1); + maybeUpdatePersisterGapWindowStartOffset(lastAcquiredOffset + 1); return new ShareAcquiredRecords(acquiredRecords, (int) (lastAcquiredOffset - firstAcquiredOffset + 1)); } finally { lock.writeLock().unlock(); @@ -1581,12 +1643,16 @@ private List createBatches( AcquisitionLockTimerTask timerTask = scheduleAcquisitionLockTimeout(memberId, acquiredRecords.firstOffset(), acquiredRecords.lastOffset()); // Add the new batch to the in-flight records along with the acquisition lock timeout task for the batch. cachedState.put(acquiredRecords.firstOffset(), new InFlightBatch( + timer, + time, memberId, acquiredRecords.firstOffset(), acquiredRecords.lastOffset(), RecordState.ACQUIRED, 1, - timerTask)); + timerTask, + timeoutHandler, + sharePartitionMetrics)); // Update the in-flight batch message count metrics for the share partition. sharePartitionMetrics.recordInFlightBatchMessageCount(acquiredRecords.lastOffset() - acquiredRecords.firstOffset() + 1); }); @@ -1606,7 +1672,7 @@ private int acquireSubsetBatchRecords( lock.writeLock().lock(); int acquiredCount = 0; try { - for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { // For the first batch which might have offsets prior to the request base // offset i.e. cached batch of 10-14 offsets and request batch of 12-13. if (offsetState.getKey() < requestFirstOffset) { @@ -1618,15 +1684,15 @@ private int acquireSubsetBatchRecords( break; } - if (offsetState.getValue().state != RecordState.AVAILABLE || offsetState.getValue().hasOngoingStateTransition()) { + if (offsetState.getValue().state() != RecordState.AVAILABLE || offsetState.getValue().hasOngoingStateTransition()) { log.trace("The offset {} is not available in share partition: {}-{}, skipping: {}", offsetState.getKey(), groupId, topicIdPartition, inFlightBatch); continue; } - InFlightState updateResult = offsetState.getValue().tryUpdateState(RecordState.ACQUIRED, true, maxDeliveryCount, - memberId); - if (updateResult == null) { + InFlightState updateResult = offsetState.getValue().tryUpdateState(RecordState.ACQUIRED, DeliveryCountOps.INCREASE, + maxDeliveryCount, memberId); + if (updateResult == null || updateResult.state() != RecordState.ACQUIRED) { log.trace("Unable to acquire records for the offset: {} in batch: {}" + " for the share partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); @@ -1641,7 +1707,7 @@ private int acquireSubsetBatchRecords( result.add(new AcquiredRecords() .setFirstOffset(offsetState.getKey()) .setLastOffset(offsetState.getKey()) - .setDeliveryCount((short) offsetState.getValue().deliveryCount)); + .setDeliveryCount((short) offsetState.getValue().deliveryCount())); acquiredCount++; } } finally { @@ -1729,7 +1795,7 @@ private NavigableMap fetchSubMapForAcknowledgementBatch( NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, batch.lastOffset(), true); // Validate if the request batch has the first offset greater than the last offset of the last // fetched cached batch, then there will be no offsets in the request that can be acknowledged. - if (subMap.lastEntry().getValue().lastOffset < batch.firstOffset()) { + if (subMap.lastEntry().getValue().lastOffset() < batch.firstOffset()) { log.debug("Request batch: {} has offsets which are not found for share partition: {}-{}", batch, groupId, topicIdPartition); throw new InvalidRequestException("Batch record not found. The first offset in request is past acquired records."); } @@ -1737,7 +1803,7 @@ private NavigableMap fetchSubMapForAcknowledgementBatch( // Validate if the request batch has the last offset greater than the last offset of // the last fetched cached batch, then there will be offsets in the request than cannot // be found in the fetched batches. - if (batch.lastOffset() > subMap.lastEntry().getValue().lastOffset) { + if (batch.lastOffset() > subMap.lastEntry().getValue().lastOffset()) { log.debug("Request batch: {} has offsets which are not found for share partition: {}-{}", batch, groupId, topicIdPartition); throw new InvalidRequestException("Batch record not found. The last offset in request is past acquired records."); } @@ -1753,8 +1819,7 @@ private Optional acknowledgeBatchRecords( ShareAcknowledgementBatch batch, Map recordStateMap, NavigableMap subMap, - final List updatedStates, - List stateBatches + List persisterBatches ) { Optional throwable; lock.writeLock().lock(); @@ -1778,6 +1843,12 @@ private Optional acknowledgeBatchRecords( if (throwable.isPresent()) { return throwable; } + + if (inFlightBatch.batchHasOngoingStateTransition()) { + log.debug("The batch has on-going transition, batch: {} for the share " + + "partition: {}-{}", inFlightBatch, groupId, topicIdPartition); + return Optional.of(new InvalidRecordStateException("The record state is invalid. The acknowledgement of delivery could not be completed.")); + } } // Determine if the in-flight batch is a full match from the request batch. @@ -1810,11 +1881,11 @@ private Optional acknowledgeBatchRecords( } throwable = acknowledgePerOffsetBatchRecords(memberId, batch, inFlightBatch, - recordStateMap, updatedStates, stateBatches); + recordStateMap, persisterBatches); } else { // The in-flight batch is a full match hence change the state of the complete batch. throwable = acknowledgeCompleteBatch(batch, inFlightBatch, - recordStateMap.get(batch.firstOffset()), updatedStates, stateBatches); + recordStateMap.get(batch.firstOffset()), persisterBatches); } if (throwable.isPresent()) { @@ -1851,15 +1922,14 @@ private Optional acknowledgePerOffsetBatchRecords( ShareAcknowledgementBatch batch, InFlightBatch inFlightBatch, Map recordStateMap, - List updatedStates, - List stateBatches + List persisterBatches ) { lock.writeLock().lock(); try { // Fetch the first record state from the map to be used as default record state in case the // offset record state is not provided by client. RecordState recordStateDefault = recordStateMap.get(batch.firstOffset()); - for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { // 1. For the first batch which might have offsets prior to the request base // offset i.e. cached batch of 10-14 offsets and request batch of 12-13. @@ -1873,16 +1943,24 @@ private Optional acknowledgePerOffsetBatchRecords( break; } - if (offsetState.getValue().state != RecordState.ACQUIRED) { + if (offsetState.getValue().state() != RecordState.ACQUIRED) { log.debug("The offset is not acquired, offset: {} batch: {} for the share" + " partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); return Optional.of(new InvalidRecordStateException( - "The batch cannot be acknowledged. The offset is not acquired.")); + "The offset cannot be acknowledged. The offset is not acquired.")); + } + + if (offsetState.getValue().hasOngoingStateTransition()) { + log.debug("The offset has on-going transition, offset: {} batch: {} for the share" + + " partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, + topicIdPartition); + return Optional.of(new InvalidRecordStateException( + "The record state is invalid. The acknowledgement of delivery could not be completed.")); } // Check if member id is the owner of the offset. - if (!offsetState.getValue().memberId.equals(memberId)) { + if (!offsetState.getValue().memberId().equals(memberId)) { log.debug("Member {} is not the owner of offset: {} in batch: {} for the share" + " partition: {}-{}", memberId, offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); @@ -1897,7 +1975,7 @@ private Optional acknowledgePerOffsetBatchRecords( recordStateDefault; InFlightState updateResult = offsetState.getValue().startStateTransition( recordState, - false, + DeliveryCountOps.NO_OP, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1908,16 +1986,10 @@ private Optional acknowledgePerOffsetBatchRecords( return Optional.of(new InvalidRecordStateException( "Unable to acknowledge records for the batch")); } - // Successfully updated the state of the offset. - updatedStates.add(updateResult); - stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), - updateResult.state.id, (short) updateResult.deliveryCount)); - // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. - // This should not change the next fetch offset because the record is not available for acquisition - if (recordState == RecordState.AVAILABLE - && updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); - } + // Successfully updated the state of the offset and created a persister state batch for write to persister. + persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(offsetState.getKey(), + offsetState.getKey(), updateResult.state().id(), (short) updateResult.deliveryCount()))); + // Do not update the nextFetchOffset as the offset has not completed the transition yet. } } finally { lock.writeLock().unlock(); @@ -1929,8 +2001,7 @@ private Optional acknowledgeCompleteBatch( ShareAcknowledgementBatch batch, InFlightBatch inFlightBatch, RecordState recordState, - List updatedStates, - List stateBatches + List persisterBatches ) { lock.writeLock().lock(); try { @@ -1950,7 +2021,7 @@ private Optional acknowledgeCompleteBatch( // is only important when the batch is acquired. InFlightState updateResult = inFlightBatch.startBatchStateTransition( recordState, - false, + DeliveryCountOps.NO_OP, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1962,18 +2033,10 @@ private Optional acknowledgeCompleteBatch( new InvalidRecordStateException("Unable to acknowledge records for the batch")); } - // Successfully updated the state of the batch. - updatedStates.add(updateResult); - stateBatches.add( - new PersisterStateBatch(inFlightBatch.firstOffset, inFlightBatch.lastOffset, - updateResult.state.id, (short) updateResult.deliveryCount)); - - // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. - // This should not change the nextFetchOffset because the record is not available for acquisition - if (recordState == RecordState.AVAILABLE - && updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); - } + // Successfully updated the state of the batch and created a persister state batch for write to persister. + persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(inFlightBatch.firstOffset(), + inFlightBatch.lastOffset(), updateResult.state().id(), (short) updateResult.deliveryCount()))); + // Do not update the next fetch offset as the batch has not completed the transition yet. } finally { lock.writeLock().unlock(); } @@ -2014,8 +2077,7 @@ SharePartitionState partitionState() { void rollbackOrProcessStateUpdates( CompletableFuture future, Throwable throwable, - List updatedStates, - List stateBatches + List persisterBatches ) { lock.writeLock().lock(); try { @@ -2023,12 +2085,17 @@ void rollbackOrProcessStateUpdates( // Log in DEBUG to avoid flooding of logs for a faulty client. log.debug("Request failed for updating state, rollback any changed state" + " for the share partition: {}-{}", groupId, topicIdPartition); - updatedStates.forEach(state -> state.completeStateTransition(false)); + persisterBatches.forEach(persisterBatch -> { + persisterBatch.updatedState.completeStateTransition(false); + if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { + updateFindNextFetchOffset(true); + } + }); future.completeExceptionally(throwable); return; } - if (stateBatches.isEmpty() && updatedStates.isEmpty()) { + if (persisterBatches.isEmpty()) { future.complete(null); return; } @@ -2036,46 +2103,63 @@ void rollbackOrProcessStateUpdates( lock.writeLock().unlock(); } - writeShareGroupState(stateBatches).whenComplete((result, exception) -> { - lock.writeLock().lock(); - try { - if (exception != null) { - log.error("Failed to write state to persister for the share partition: {}-{}", - groupId, topicIdPartition, exception); - updatedStates.forEach(state -> state.completeStateTransition(false)); - future.completeExceptionally(exception); - return; - } + writeShareGroupState(persisterBatches.stream().map(PersisterBatch::stateBatch).toList()) + .whenComplete((result, exception) -> { + // There can be a pending delayed share fetch requests for the share partition which are waiting + // on the startOffset to move ahead, hence track if the state is updated in the cache. If + // yes, then notify the delayed share fetch purgatory to complete the pending requests. + boolean cacheStateUpdated = false; + lock.writeLock().lock(); + try { + if (exception != null) { + log.debug("Failed to write state to persister for the share partition: {}-{}", + groupId, topicIdPartition, exception); + // In case of failure when transition state is rolled back then it should be rolled + // back to ACQUIRED state, unless acquisition lock for the state has expired. + persisterBatches.forEach(persisterBatch -> { + persisterBatch.updatedState.completeStateTransition(false); + if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { + updateFindNextFetchOffset(true); + } + }); + future.completeExceptionally(exception); + return; + } - log.trace("State change request successful for share partition: {}-{}", - groupId, topicIdPartition); - updatedStates.forEach(state -> { - state.completeStateTransition(true); - // Cancel the acquisition lock timeout task for the state since it is acknowledged/released successfully. - state.cancelAndClearAcquisitionLockTimeoutTask(); - }); - // Update the cached state and start and end offsets after acknowledging/releasing the acquired records. - maybeUpdateCachedStateAndOffsets(); - future.complete(null); - } finally { - lock.writeLock().unlock(); - } - }); + log.trace("State change request successful for share partition: {}-{}", + groupId, topicIdPartition); + persisterBatches.forEach(persisterBatch -> { + persisterBatch.updatedState.completeStateTransition(true); + if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { + updateFindNextFetchOffset(true); + } + }); + // Update the cached state and start and end offsets after acknowledging/releasing the acquired records. + cacheStateUpdated = maybeUpdateCachedStateAndOffsets(); + future.complete(null); + } finally { + lock.writeLock().unlock(); + // Maybe complete the delayed share fetch request if the state has been changed in cache + // which might have moved start offset ahead. Hence, the pending delayed share fetch + // request can be completed. The call should be made outside the lock to avoid deadlock. + maybeCompleteDelayedShareFetchRequest(cacheStateUpdated); + } + }); } - private void maybeUpdateCachedStateAndOffsets() { + private boolean maybeUpdateCachedStateAndOffsets() { lock.writeLock().lock(); try { if (!canMoveStartOffset()) { - return; + return false; } // This will help to find the next position for the startOffset. // The new position of startOffset will be lastOffsetAcknowledged + 1 long lastOffsetAcknowledged = findLastOffsetAcknowledged(); - // If lastOffsetAcknowledged is -1, this means we cannot move out startOffset ahead + // If lastOffsetAcknowledged is -1, this means we cannot move startOffset ahead if (lastOffsetAcknowledged == -1) { - return; + return false; } // This is true if all records in the cachedState have been acknowledged (either Accept or Reject). @@ -2086,7 +2170,7 @@ private void maybeUpdateCachedStateAndOffsets() { endOffset = lastCachedOffset + 1; cachedState.clear(); // Nothing further to do. - return; + return true; } /* @@ -2096,7 +2180,7 @@ private void maybeUpdateCachedStateAndOffsets() { a) Only full batches can be removed from the cachedState, For example if there is batch (0-99) and 0-49 records are acknowledged (ACCEPT or REJECT), the first 50 records will not be removed from the cachedState. Instead, the startOffset will be moved to 50, but the batch will only - be removed once all the messages (0-99) are acknowledged (ACCEPT or REJECT). + be removed once all the records (0-99) are acknowledged (ACCEPT or REJECT). */ // Since only a subMap will be removed, we need to find the first and last keys of that subMap @@ -2106,15 +2190,15 @@ be removed once all the messages (0-99) are acknowledged (ACCEPT or REJECT). // If the lastOffsetAcknowledged is equal to the last offset of entry, then the entire batch can potentially be removed. if (lastOffsetAcknowledged == entry.getValue().lastOffset()) { startOffset = cachedState.higherKey(lastOffsetAcknowledged); - if (isInitialReadGapOffsetWindowActive()) { + if (isPersisterReadGapWindowActive()) { // This case will arise if we have a situation where there is an acquirable gap after the lastOffsetAcknowledged. // Ex, the cachedState has following state batches -> {(0, 10), (11, 20), (31,40)} and all these batches are acked. - // There is a gap from 21 to 30. Let the initialReadGapOffset.gapStartOffset be 21. In this case, + // There is a gap from 21 to 30. Let the gapWindow's gapStartOffset be 21. In this case, // lastOffsetAcknowledged will be 20, but we cannot simply move the start offset to the first offset // of next cachedState batch (next cachedState batch is 31 to 40). There is an acquirable gap in between (21 to 30) - // and The startOffset should be at 21. Hence, we set startOffset to the minimum of initialReadGapOffset.gapStartOffset + // and The startOffset should be at 21. Hence, we set startOffset to the minimum of gapWindow.gapStartOffset // and higher key of lastOffsetAcknowledged - startOffset = Math.min(initialReadGapOffset.gapStartOffset(), startOffset); + startOffset = Math.min(persisterReadResultGapWindow.gapStartOffset(), startOffset); } lastKeyToRemove = entry.getKey(); } else { @@ -2133,12 +2217,14 @@ be removed once all the messages (0-99) are acknowledged (ACCEPT or REJECT). if (lastKeyToRemove != -1) { cachedState.subMap(firstKeyToRemove, true, lastKeyToRemove, true).clear(); } + return true; } finally { lock.writeLock().unlock(); } } - private boolean canMoveStartOffset() { + // Visible for testing. + boolean canMoveStartOffset() { // The Share Partition Start Offset may be moved after acknowledge request is complete. // The following conditions need to be met to move the startOffset: // 1. When the cachedState is not empty. @@ -2163,14 +2249,22 @@ private boolean canMoveStartOffset() { "as there is an acquirable gap at the beginning. Cannot move the start offset.", startOffset, groupId, topicIdPartition); return false; } - RecordState startOffsetState = entry.getValue().offsetState == null ? + boolean isBatchState = entry.getValue().offsetState() == null; + boolean isOngoingTransition = isBatchState ? + entry.getValue().batchHasOngoingStateTransition() : + entry.getValue().offsetState().get(startOffset).hasOngoingStateTransition(); + if (isOngoingTransition) { + return false; + } + + RecordState startOffsetState = isBatchState ? entry.getValue().batchState() : entry.getValue().offsetState().get(startOffset).state(); return isRecordStateAcknowledged(startOffsetState); } - private boolean isInitialReadGapOffsetWindowActive() { - return initialReadGapOffset != null && initialReadGapOffset.endOffset() == endOffset; + private boolean isPersisterReadGapWindowActive() { + return persisterReadResultGapWindow != null && persisterReadResultGapWindow.endOffset() == endOffset; } /** @@ -2193,18 +2287,18 @@ long findLastOffsetAcknowledged() { for (NavigableMap.Entry entry : cachedState.entrySet()) { InFlightBatch inFlightBatch = entry.getValue(); - if (isInitialReadGapOffsetWindowActive() && inFlightBatch.lastOffset() >= initialReadGapOffset.gapStartOffset()) { + if (isPersisterReadGapWindowActive() && inFlightBatch.lastOffset() >= persisterReadResultGapWindow.gapStartOffset()) { return lastOffsetAcknowledged; } if (inFlightBatch.offsetState() == null) { - if (!isRecordStateAcknowledged(inFlightBatch.batchState())) { + if (inFlightBatch.batchHasOngoingStateTransition() || !isRecordStateAcknowledged(inFlightBatch.batchState())) { return lastOffsetAcknowledged; } lastOffsetAcknowledged = inFlightBatch.lastOffset(); } else { - for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { - if (!isRecordStateAcknowledged(offsetState.getValue().state())) { + for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { + if (offsetState.getValue().hasOngoingStateTransition() || !isRecordStateAcknowledged(offsetState.getValue().state())) { return lastOffsetAcknowledged; } lastOffsetAcknowledged = offsetState.getKey(); @@ -2252,7 +2346,7 @@ CompletableFuture writeShareGroupState(List stateBatc .setGroupId(this.groupId) .setTopicsData(List.of(new TopicData<>(topicIdPartition.topicId(), List.of(PartitionFactory.newPartitionStateBatchData( - topicIdPartition.partition(), stateEpoch, startOffset, leaderEpoch, stateBatches)))) + topicIdPartition.partition(), stateEpoch, startOffset(), leaderEpoch, stateBatches)))) ).build()).build()) .whenComplete((result, exception) -> { if (exception != null) { @@ -2283,8 +2377,8 @@ CompletableFuture writeShareGroupState(List stateBatc PartitionErrorData partitionData = state.partitions().get(0); if (partitionData.errorCode() != Errors.NONE.code()) { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); - log.error("Failed to write the share group state for share partition: {}-{} due to exception", - groupId, topicIdPartition, ex); + maybeLogError(String.format("Failed to write the share group state for share partition: %s-%s due to exception", + groupId, topicIdPartition), Errors.forCode(partitionData.errorCode()), ex); future.completeExceptionally(ex); return; } @@ -2302,9 +2396,7 @@ private KafkaException fetchPersisterError(short errorCode, String errorMessage) new GroupIdNotFoundException(errorMessage); case UNKNOWN_TOPIC_OR_PARTITION -> new UnknownTopicOrPartitionException(errorMessage); - case FENCED_STATE_EPOCH -> - new FencedStateEpochException(errorMessage); - case FENCED_LEADER_EPOCH -> + case FENCED_LEADER_EPOCH, FENCED_STATE_EPOCH -> new NotLeaderOrFollowerException(errorMessage); default -> new UnknownServerException(errorMessage); @@ -2345,44 +2437,56 @@ private AcquisitionLockTimerTask acquisitionLockTimerTask( long lastOffset, long delayMs ) { - return new AcquisitionLockTimerTask(delayMs, memberId, firstOffset, lastOffset); + return new AcquisitionLockTimerTask(time, delayMs, memberId, firstOffset, lastOffset, releaseAcquisitionLockOnTimeout(), sharePartitionMetrics); } - private void releaseAcquisitionLockOnTimeout(String memberId, long firstOffset, long lastOffset) { - List stateBatches; - lock.writeLock().lock(); - try { - Map.Entry floorOffset = cachedState.floorEntry(firstOffset); - if (floorOffset == null) { - log.error("Base offset {} not found for share partition: {}-{}", firstOffset, groupId, topicIdPartition); - return; - } - stateBatches = new ArrayList<>(); - NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, lastOffset, true); - for (Map.Entry entry : subMap.entrySet()) { - InFlightBatch inFlightBatch = entry.getValue(); + private AcquisitionLockTimeoutHandler releaseAcquisitionLockOnTimeout() { + return (memberId, firstOffset, lastOffset, timerTask) -> { + List stateBatches; + lock.writeLock().lock(); + try { + // Check if timer task is already cancelled. This can happen when concurrent requests + // happen to acknowledge in-flight state and timeout handler is waiting for the lock + // but already cancelled. + if (timerTask.isCancelled()) { + log.debug("Timer task is already cancelled, not executing further."); + return; + } - if (inFlightBatch.offsetState() == null + Map.Entry floorOffset = cachedState.floorEntry(firstOffset); + if (floorOffset == null) { + log.error("Base offset {} not found for share partition: {}-{}", firstOffset, groupId, topicIdPartition); + return; + } + stateBatches = new ArrayList<>(); + NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, lastOffset, true); + for (Map.Entry entry : subMap.entrySet()) { + InFlightBatch inFlightBatch = entry.getValue(); + + if (inFlightBatch.offsetState() == null && inFlightBatch.batchState() == RecordState.ACQUIRED && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset())) { - // For the case when batch.firstOffset < start offset <= batch.lastOffset, we will be having some - // acquired records that need to move to archived state despite their delivery count. - inFlightBatch.maybeInitializeOffsetStateUpdate(); - } + // For the case when batch.firstOffset < start offset <= batch.lastOffset, we will be having some + // acquired records that need to move to archived state despite their delivery count. + inFlightBatch.maybeInitializeOffsetStateUpdate(); + } - // Case when the state of complete batch is valid - if (inFlightBatch.offsetState() == null) { - releaseAcquisitionLockOnTimeoutForCompleteBatch(inFlightBatch, stateBatches, memberId); - } else { // Case when batch has a valid offset state map. - releaseAcquisitionLockOnTimeoutForPerOffsetBatch(inFlightBatch, stateBatches, memberId, firstOffset, lastOffset); + // Case when the state of complete batch is valid + if (inFlightBatch.offsetState() == null) { + releaseAcquisitionLockOnTimeoutForCompleteBatch(inFlightBatch, stateBatches, memberId); + } else { // Case when batch has a valid offset state map. + releaseAcquisitionLockOnTimeoutForPerOffsetBatch(inFlightBatch, stateBatches, memberId, firstOffset, lastOffset); + } } + } finally { + lock.writeLock().unlock(); } if (!stateBatches.isEmpty()) { writeShareGroupState(stateBatches).whenComplete((result, exception) -> { if (exception != null) { - log.error("Failed to write the share group state on acquisition lock timeout for share partition: {}-{} memberId: {}", + log.debug("Failed to write the share group state on acquisition lock timeout for share partition: {}-{} memberId: {}", groupId, topicIdPartition, memberId, exception); } // Even if write share group state RPC call fails, we will still go ahead with the state transition. @@ -2390,17 +2494,12 @@ && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.las maybeUpdateCachedStateAndOffsets(); }); } - } finally { - lock.writeLock().unlock(); - } - // Skip null check for stateBatches, it should always be initialized if reached here. - if (!stateBatches.isEmpty()) { // If we have an acquisition lock timeout for a share-partition, then we should check if // there is a pending share fetch request for the share-partition and complete it. - DelayedShareFetchKey delayedShareFetchKey = new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()); - replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); - } + // Skip null check for stateBatches, it should always be initialized if reached here. + maybeCompleteDelayedShareFetchRequest(!stateBatches.isEmpty()); + }; } private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFlightBatch, @@ -2409,7 +2508,7 @@ private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFli if (inFlightBatch.batchState() == RecordState.ACQUIRED) { InFlightState updateResult = inFlightBatch.tryUpdateBatchState( inFlightBatch.lastOffset() < startOffset ? RecordState.ARCHIVED : RecordState.AVAILABLE, - false, + DeliveryCountOps.NO_OP, maxDeliveryCount, EMPTY_MEMBER_ID); if (updateResult == null) { @@ -2418,12 +2517,12 @@ private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFli return; } stateBatches.add(new PersisterStateBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset(), - updateResult.state.id, (short) updateResult.deliveryCount)); + updateResult.state().id(), (short) updateResult.deliveryCount())); // Cancel the acquisition lock timeout task for the batch since it is completed now. updateResult.cancelAndClearAcquisitionLockTimeoutTask(); - if (updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); + if (updateResult.state() != RecordState.ARCHIVED) { + updateFindNextFetchOffset(true); } return; } @@ -2447,7 +2546,7 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl // No further offsets to process. break; } - if (offsetState.getValue().state != RecordState.ACQUIRED) { + if (offsetState.getValue().state() != RecordState.ACQUIRED) { log.debug("The offset is not in acquired state while release of acquisition lock on timeout, skipping, offset: {} batch: {}" + " for the share partition: {}-{} memberId: {}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition, memberId); @@ -2455,7 +2554,7 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl } InFlightState updateResult = offsetState.getValue().tryUpdateState( offsetState.getKey() < startOffset ? RecordState.ARCHIVED : RecordState.AVAILABLE, - false, + DeliveryCountOps.NO_OP, maxDeliveryCount, EMPTY_MEMBER_ID); if (updateResult == null) { @@ -2465,16 +2564,22 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl continue; } stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), - updateResult.state.id, (short) updateResult.deliveryCount)); + updateResult.state().id(), (short) updateResult.deliveryCount())); // Cancel the acquisition lock timeout task for the offset since it is completed now. updateResult.cancelAndClearAcquisitionLockTimeoutTask(); - if (updateResult.state != RecordState.ARCHIVED) { - findNextFetchOffset.set(true); + if (updateResult.state() != RecordState.ARCHIVED) { + updateFindNextFetchOffset(true); } } } + private void maybeCompleteDelayedShareFetchRequest(boolean shouldComplete) { + if (shouldComplete) { + replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); + } + } + private long startOffsetDuringInitialization(long partitionDataStartOffset) { // Set the state epoch and end offset from the persisted state. if (partitionDataStartOffset != PartitionFactory.UNINITIALIZED_START_OFFSET) { @@ -2531,6 +2636,14 @@ private List filterAbortedTransactionalAcquiredRecords( return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); } + private void maybeLogError(String message, Errors receivedError, Throwable wrappedException) { + if (receivedError == Errors.NETWORK_EXCEPTION) { + log.debug(message, wrappedException); + } else { + log.error(message, wrappedException); + } + } + /** * This function filters out the offsets present in the acquired records list that are also a part of batches that need to be archived. * It follows an iterative refinement of acquired records to eliminate batches to be archived. @@ -2690,12 +2803,22 @@ NavigableMap cachedState() { // Visible for testing. boolean findNextFetchOffset() { - return findNextFetchOffset.get(); + lock.readLock().lock(); + try { + return findNextFetchOffset; + } finally { + lock.readLock().unlock(); + } } - // Visible for testing. Should only be used for testing purposes. - void findNextFetchOffset(boolean findNextOffset) { - findNextFetchOffset.getAndSet(findNextOffset); + // Visible for testing. + void updateFindNextFetchOffset(boolean value) { + lock.writeLock().lock(); + try { + findNextFetchOffset = value; + } finally { + lock.writeLock().unlock(); + } } // Visible for testing @@ -2729,22 +2852,27 @@ Timer timer() { } // Visible for testing - InitialReadGapOffset initialReadGapOffset() { - return initialReadGapOffset; + GapWindow persisterReadResultGapWindow() { + return persisterReadResultGapWindow; + } + + // Visible for testing. + Uuid fetchLock() { + return fetchLock.get(); } /** - * The InitialReadGapOffset class is used to record the gap start and end offset of the probable gaps + * The GapWindow class is used to record the gap start and end offset of the probable gaps * of available records which are neither known to Persister nor to SharePartition. Share Partition * will use this information to determine the next fetch offset and should try to fetch the records * in the gap. */ // Visible for Testing - static class InitialReadGapOffset { + static class GapWindow { private final long endOffset; private long gapStartOffset; - InitialReadGapOffset(long endOffset, long gapStartOffset) { + GapWindow(long endOffset, long gapStartOffset) { this.endOffset = endOffset; this.gapStartOffset = gapStartOffset; } @@ -2762,317 +2890,6 @@ void gapStartOffset(long gapStartOffset) { } } - // Visible for testing - final class AcquisitionLockTimerTask extends TimerTask { - private final long expirationMs; - private final String memberId; - private final long firstOffset; - private final long lastOffset; - - AcquisitionLockTimerTask(long delayMs, String memberId, long firstOffset, long lastOffset) { - super(delayMs); - this.expirationMs = time.hiResClockMs() + delayMs; - this.memberId = memberId; - this.firstOffset = firstOffset; - this.lastOffset = lastOffset; - } - - long expirationMs() { - return expirationMs; - } - - /** - * The task is executed when the acquisition lock timeout is reached. The task releases the acquired records. - */ - @Override - public void run() { - sharePartitionMetrics.recordAcquisitionLockTimeoutPerSec(lastOffset - firstOffset + 1); - releaseAcquisitionLockOnTimeout(memberId, firstOffset, lastOffset); - } - } - - /** - * The InFlightBatch maintains the in-memory state of the fetched records i.e. in-flight records. - */ - final class InFlightBatch { - // The offset of the first record in the batch that is fetched from the log. - private final long firstOffset; - // The last offset of the batch that is fetched from the log. - private final long lastOffset; - - // The batch state of the fetched records. If the offset state map is empty then batchState - // determines the state of the complete batch else individual offset determines the state of - // the respective records. - private InFlightState batchState; - - // The offset state map is used to track the state of the records per offset. However, the - // offset state map is only required when the state of the offsets within same batch are - // different. The states can be different when explicit offset acknowledgment is done which - // is different from the batch state. - private NavigableMap offsetState; - - InFlightBatch(String memberId, long firstOffset, long lastOffset, RecordState state, - int deliveryCount, AcquisitionLockTimerTask acquisitionLockTimeoutTask - ) { - this.firstOffset = firstOffset; - this.lastOffset = lastOffset; - this.batchState = new InFlightState(state, deliveryCount, memberId, acquisitionLockTimeoutTask); - } - - // Visible for testing. - long firstOffset() { - return firstOffset; - } - - // Visible for testing. - long lastOffset() { - return lastOffset; - } - - // Visible for testing. - RecordState batchState() { - return inFlightState().state; - } - - // Visible for testing. - String batchMemberId() { - if (batchState == null) { - throw new IllegalStateException("The batch member id is not available as the offset state is maintained"); - } - return batchState.memberId; - } - - // Visible for testing. - int batchDeliveryCount() { - if (batchState == null) { - throw new IllegalStateException("The batch delivery count is not available as the offset state is maintained"); - } - return batchState.deliveryCount; - } - - // Visible for testing. - AcquisitionLockTimerTask batchAcquisitionLockTimeoutTask() { - return inFlightState().acquisitionLockTimeoutTask; - } - - // Visible for testing. - NavigableMap offsetState() { - return offsetState; - } - - private InFlightState inFlightState() { - if (batchState == null) { - throw new IllegalStateException("The batch state is not available as the offset state is maintained"); - } - return batchState; - } - - private boolean batchHasOngoingStateTransition() { - return inFlightState().hasOngoingStateTransition(); - } - - private void archiveBatch(String newMemberId) { - inFlightState().archive(newMemberId); - } - - private InFlightState tryUpdateBatchState(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { - if (batchState == null) { - throw new IllegalStateException("The batch state update is not available as the offset state is maintained"); - } - return batchState.tryUpdateState(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); - } - - private InFlightState startBatchStateTransition(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, - String newMemberId) { - if (batchState == null) { - throw new IllegalStateException("The batch state update is not available as the offset state is maintained"); - } - return batchState.startStateTransition(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); - } - - private void maybeInitializeOffsetStateUpdate() { - if (offsetState == null) { - offsetState = new ConcurrentSkipListMap<>(); - // The offset state map is not initialized hence initialize the state of the offsets - // from the first offset to the last offset. Mark the batch inflightState to null as - // the state of the records is maintained in the offset state map now. - for (long offset = this.firstOffset; offset <= this.lastOffset; offset++) { - if (batchState.acquisitionLockTimeoutTask != null) { - // The acquisition lock timeout task is already scheduled for the batch, hence we need to schedule - // the acquisition lock timeout task for the offset as well. - long delayMs = batchState.acquisitionLockTimeoutTask.expirationMs() - time.hiResClockMs(); - AcquisitionLockTimerTask timerTask = acquisitionLockTimerTask(batchState.memberId, offset, offset, delayMs); - offsetState.put(offset, new InFlightState(batchState.state, batchState.deliveryCount, batchState.memberId, timerTask)); - timer.add(timerTask); - } else { - offsetState.put(offset, new InFlightState(batchState.state, batchState.deliveryCount, batchState.memberId)); - } - } - // Cancel the acquisition lock timeout task for the batch as the offset state is maintained. - if (batchState.acquisitionLockTimeoutTask != null) { - batchState.cancelAndClearAcquisitionLockTimeoutTask(); - } - batchState = null; - } - } - - private void updateAcquisitionLockTimeout(AcquisitionLockTimerTask acquisitionLockTimeoutTask) { - inFlightState().acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; - } - - @Override - public String toString() { - return "InFlightBatch(" + - "firstOffset=" + firstOffset + - ", lastOffset=" + lastOffset + - ", inFlightState=" + batchState + - ", offsetState=" + ((offsetState == null) ? "null" : offsetState) + - ")"; - } - } - - /** - * The InFlightState is used to track the state and delivery count of a record that has been - * fetched from the leader. The state of the record is used to determine if the record should - * be re-deliver or if it can be acknowledged or archived. - */ - static final class InFlightState { - - // The state of the fetch batch records. - private RecordState state; - // The number of times the records has been delivered to the client. - private int deliveryCount; - // The member id of the client that is fetching/acknowledging the record. - private String memberId; - // The state of the records before the transition. In case we need to revert an in-flight state, we revert the above - // attributes of InFlightState to this state, namely - state, deliveryCount and memberId. - private InFlightState rollbackState; - // The timer task for the acquisition lock timeout. - private AcquisitionLockTimerTask acquisitionLockTimeoutTask; - - - InFlightState(RecordState state, int deliveryCount, String memberId) { - this(state, deliveryCount, memberId, null); - } - - InFlightState(RecordState state, int deliveryCount, String memberId, AcquisitionLockTimerTask acquisitionLockTimeoutTask) { - this.state = state; - this.deliveryCount = deliveryCount; - this.memberId = memberId; - this.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; - } - - // Visible for testing. - RecordState state() { - return state; - } - - String memberId() { - return memberId; - } - - // Visible for testing. - TimerTask acquisitionLockTimeoutTask() { - return acquisitionLockTimeoutTask; - } - - void updateAcquisitionLockTimeoutTask(AcquisitionLockTimerTask acquisitionLockTimeoutTask) throws IllegalArgumentException { - if (this.acquisitionLockTimeoutTask != null) { - throw new IllegalArgumentException("Existing acquisition lock timeout exists, cannot override."); - } - this.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; - } - - void cancelAndClearAcquisitionLockTimeoutTask() { - acquisitionLockTimeoutTask.cancel(); - acquisitionLockTimeoutTask = null; - } - - private boolean hasOngoingStateTransition() { - if (rollbackState == null) { - // This case could occur when the batch/offset hasn't transitioned even once or the state transitions have - // been committed. - return false; - } - return rollbackState.state != null; - } - - /** - * Try to update the state of the records. The state of the records can only be updated if the - * new state is allowed to be transitioned from old state. The delivery count is not incremented - * if the state update is unsuccessful. - * - * @param newState The new state of the records. - * @param incrementDeliveryCount Whether to increment the delivery count. - * - * @return {@code InFlightState} if update succeeds, null otherwise. Returning state - * helps update chaining. - */ - private InFlightState tryUpdateState(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { - try { - if (newState == RecordState.AVAILABLE && deliveryCount >= maxDeliveryCount) { - newState = RecordState.ARCHIVED; - } - state = state.validateTransition(newState); - if (incrementDeliveryCount && newState != RecordState.ARCHIVED) { - deliveryCount++; - } - memberId = newMemberId; - return this; - } catch (IllegalStateException e) { - log.error("Failed to update state of the records", e); - return null; - } - } - - private void archive(String newMemberId) { - state = RecordState.ARCHIVED; - memberId = newMemberId; - } - - private InFlightState startStateTransition(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { - rollbackState = new InFlightState(state, deliveryCount, memberId, acquisitionLockTimeoutTask); - return tryUpdateState(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); - } - - private void completeStateTransition(boolean commit) { - if (commit) { - rollbackState = null; - return; - } - state = rollbackState.state; - deliveryCount = rollbackState.deliveryCount; - memberId = rollbackState.memberId; - rollbackState = null; - } - - @Override - public int hashCode() { - return Objects.hash(state, deliveryCount, memberId); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - InFlightState that = (InFlightState) o; - return state == that.state && deliveryCount == that.deliveryCount && memberId.equals(that.memberId); - } - - @Override - public String toString() { - return "InFlightState(" + - "state=" + state.toString() + - ", deliveryCount=" + deliveryCount + - ", memberId=" + memberId + - ")"; - } - } - /** * FetchOffsetMetadata class is used to cache offset and its log metadata. */ @@ -3098,4 +2915,22 @@ void updateOffsetMetadata(long offset, LogOffsetMetadata offsetMetadata) { this.offsetMetadata = offsetMetadata; } } + + /** + * PersisterBatch class is used to record the state updates for a batch or an offset. + * It contains the updated in-flight state and the persister state batch to be sent to persister. + */ + private record PersisterBatch( + InFlightState updatedState, + PersisterStateBatch stateBatch + ) { } + + /** + * LastOffsetAndMaxRecords class is used to track the last offset to acquire and the maximum number + * of records that can be acquired in a fetch request. + */ + private record LastOffsetAndMaxRecords( + long lastOffset, + int maxRecords + ) { } } diff --git a/core/src/main/java/kafka/server/share/SharePartitionCache.java b/core/src/main/java/kafka/server/share/SharePartitionCache.java new file mode 100644 index 0000000000000..d2913029e4c70 --- /dev/null +++ b/core/src/main/java/kafka/server/share/SharePartitionCache.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server.share; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.server.share.SharePartitionKey; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; + +/** + * The SharePartitionCache is used to cache the SharePartition objects. The cache is thread-safe. + */ +public class SharePartitionCache { + + /** + * The map to store the share group id and the set of topic-partitions for that group. + */ + private final Map> groups; + + /** + * The map is used to store the SharePartition objects for each share group topic-partition. + */ + private final Map partitions; + + SharePartitionCache() { + this.groups = new HashMap<>(); + this.partitions = new ConcurrentHashMap<>(); + } + + /** + * Returns the share partition for the given key. + * + * @param partitionKey The key to get the share partition for. + * @return The share partition for the key or null if not found. + */ + public SharePartition get(SharePartitionKey partitionKey) { + return partitions.get(partitionKey); + } + + /** + * Returns the set of topic-partitions for the given group id. + * + * @param groupId The group id to get the topic-partitions for. + * @return The set of topic-partitions for the group id. + */ + public synchronized Set topicIdPartitionsForGroup(String groupId) { + return groups.containsKey(groupId) ? Set.copyOf(groups.get(groupId)) : Set.of(); + } + + /** + * Removes the share partition from the cache. The method also removes the topic-partition from + * the group map. + * + * @param partitionKey The key to remove. + * @return The removed value or null if not found. + */ + public synchronized SharePartition remove(SharePartitionKey partitionKey) { + groups.computeIfPresent(partitionKey.groupId(), (k, v) -> { + v.remove(partitionKey.topicIdPartition()); + return v.isEmpty() ? null : v; + }); + return partitions.remove(partitionKey); + } + + /** + * Computes the value for the given key if it is not already present in the cache. Method also + * updates the group map with the topic-partition for the group id. + * + * @param partitionKey The key to compute the value for. + * @param mappingFunction The function to compute the value. + * @return The computed or existing value. + */ + public synchronized SharePartition computeIfAbsent(SharePartitionKey partitionKey, Function mappingFunction) { + groups.computeIfAbsent(partitionKey.groupId(), k -> new HashSet<>()).add(partitionKey.topicIdPartition()); + return partitions.computeIfAbsent(partitionKey, mappingFunction); + } + + /** + * Returns the set of all share partition keys in the cache. As the cache can't be cleaned without + * marking the share partitions fenced and detaching the partition listener in the replica manager, + * hence rather providing a method to clean the cache directly, this method is provided to fetch + * all the keys in the cache. + * + * @return The set of all share partition keys. + */ + public Set cachedSharePartitionKeys() { + return partitions.keySet(); + } + + // Visible for testing. Should not be used outside the test classes. + void put(SharePartitionKey partitionKey, SharePartition sharePartition) { + partitions.put(partitionKey, sharePartition); + } + + // Visible for testing. + int size() { + return partitions.size(); + } + + // Visible for testing. + boolean containsKey(SharePartitionKey partitionKey) { + return partitions.containsKey(partitionKey); + } + + // Visible for testing. + boolean isEmpty() { + return partitions.isEmpty(); + } + + // Visible for testing. + synchronized Map> groups() { + return Map.copyOf(groups); + } +} diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 1b50a70c18ca6..3c9c727c53a64 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -34,7 +34,9 @@ import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfigManager; +import org.apache.kafka.server.common.ShareVersion; import org.apache.kafka.server.share.CachedSharePartition; +import org.apache.kafka.server.share.ShareGroupListener; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; import org.apache.kafka.server.share.context.FinalContext; @@ -56,6 +58,7 @@ import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.slf4j.Logger; @@ -71,7 +74,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -84,9 +86,9 @@ public class SharePartitionManager implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(SharePartitionManager.class); /** - * The partition cache map is used to store the SharePartition objects for each share group topic-partition. + * The partition cache is used to store the SharePartition objects for each share group topic-partition. */ - private final Map partitionCacheMap; + private final SharePartitionCache partitionCache; /** * The replica manager is used to fetch messages from the log. @@ -120,14 +122,18 @@ public class SharePartitionManager implements AutoCloseable { private final Timer timer; /** - * The max in flight messages is the maximum number of messages that can be in flight at any one time per share-partition. + * The max in flight records is the maximum number of records that can be in flight at any one time per share-partition. */ - private final int maxInFlightMessages; + private final int maxInFlightRecords; /** * The max delivery count is the maximum number of times a message can be delivered before it is considered to be archived. */ private final int maxDeliveryCount; + /** + * The max wait time for a share fetch request having remote storage fetch. + */ + private final long remoteFetchMaxWaitMs; /** * The persister is used to persist the share partition state. @@ -150,7 +156,8 @@ public SharePartitionManager( ShareSessionCache cache, int defaultRecordLockDurationMs, int maxDeliveryCount, - int maxInFlightMessages, + int maxInFlightRecords, + long remoteFetchMaxWaitMs, Persister persister, GroupConfigManager groupConfigManager, BrokerTopicStats brokerTopicStats @@ -158,10 +165,11 @@ public SharePartitionManager( this(replicaManager, time, cache, - new ConcurrentHashMap<>(), + new SharePartitionCache(), defaultRecordLockDurationMs, maxDeliveryCount, - maxInFlightMessages, + maxInFlightRecords, + remoteFetchMaxWaitMs, persister, groupConfigManager, new ShareGroupMetrics(time), @@ -173,10 +181,11 @@ private SharePartitionManager( ReplicaManager replicaManager, Time time, ShareSessionCache cache, - Map partitionCacheMap, + SharePartitionCache partitionCache, int defaultRecordLockDurationMs, int maxDeliveryCount, - int maxInFlightMessages, + int maxInFlightRecords, + long remoteFetchMaxWaitMs, Persister persister, GroupConfigManager groupConfigManager, ShareGroupMetrics shareGroupMetrics, @@ -185,12 +194,13 @@ private SharePartitionManager( this(replicaManager, time, cache, - partitionCacheMap, + partitionCache, defaultRecordLockDurationMs, new SystemTimerReaper("share-group-lock-timeout-reaper", new SystemTimer("share-group-lock-timeout")), maxDeliveryCount, - maxInFlightMessages, + maxInFlightRecords, + remoteFetchMaxWaitMs, persister, groupConfigManager, shareGroupMetrics, @@ -203,11 +213,12 @@ private SharePartitionManager( ReplicaManager replicaManager, Time time, ShareSessionCache cache, - Map partitionCacheMap, + SharePartitionCache partitionCache, int defaultRecordLockDurationMs, Timer timer, int maxDeliveryCount, - int maxInFlightMessages, + int maxInFlightRecords, + long remoteFetchMaxWaitMs, Persister persister, GroupConfigManager groupConfigManager, ShareGroupMetrics shareGroupMetrics, @@ -216,15 +227,17 @@ private SharePartitionManager( this.replicaManager = replicaManager; this.time = time; this.cache = cache; - this.partitionCacheMap = partitionCacheMap; + this.partitionCache = partitionCache; this.defaultRecordLockDurationMs = defaultRecordLockDurationMs; this.timer = timer; this.maxDeliveryCount = maxDeliveryCount; - this.maxInFlightMessages = maxInFlightMessages; + this.maxInFlightRecords = maxInFlightRecords; + this.remoteFetchMaxWaitMs = remoteFetchMaxWaitMs; this.persister = persister; this.groupConfigManager = groupConfigManager; this.shareGroupMetrics = shareGroupMetrics; this.brokerTopicStats = brokerTopicStats; + this.cache.registerShareGroupListener(new ShareGroupListenerImpl()); } /** @@ -286,7 +299,7 @@ public CompletableFuture { topics.add(topicIdPartition.topic()); SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); - SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); + SharePartition sharePartition = partitionCache.get(sharePartitionKey); if (sharePartition != null) { CompletableFuture future = new CompletableFuture<>(); sharePartition.acknowledge(memberId, acknowledgePartitionBatches).whenComplete((result, throwable) -> { @@ -362,7 +375,7 @@ public CompletableFuture> futuresMap = new HashMap<>(); topicIdPartitions.forEach(topicIdPartition -> { SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); - SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); + SharePartition sharePartition = partitionCache.get(sharePartitionKey); if (sharePartition == null) { log.error("No share partition found for groupId {} topicPartition {} while releasing acquired topic partitions", groupId, topicIdPartition); futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION.exception())); @@ -388,6 +401,21 @@ public CompletableFuture createIdleShareFetchTimerTask(long maxWaitMs) { + CompletableFuture future = new CompletableFuture<>(); + TimerTask idleShareFetchTimerTask = new IdleShareFetchTimerTask(maxWaitMs, future); + replicaManager.addShareFetchTimerRequest(idleShareFetchTimerTask); + return future; + } + private CompletableFuture> mapAcknowledgementFutures( Map> futuresMap, Optional>> failedMetricsHandler @@ -420,11 +448,18 @@ private CompletableFuture shareFetchData, - List toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { + public ShareFetchContext newContext( + String groupId, + List shareFetchData, + List toForget, + ShareRequestMetadata reqMetadata, + Boolean isAcknowledgeDataPresent, + String clientConnectionId + ) { ShareFetchContext context; // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. @@ -449,10 +484,10 @@ public ShareFetchContext newContext(String groupId, List share shareFetchData.forEach(topicIdPartition -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), - time.milliseconds(), cachedSharePartitions); + cachedSharePartitions, clientConnectionId); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); - throw Errors.SHARE_SESSION_NOT_FOUND.exception(); + throw Errors.SHARE_SESSION_LIMIT_REACHED.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchData); @@ -476,7 +511,7 @@ public ShareFetchContext newContext(String groupId, List share } Map> modifiedTopicIdPartitions = shareSession.update( shareFetchData, toForget); - cache.touch(shareSession, time.milliseconds()); + cache.updateNumPartitions(shareSession); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, @@ -517,12 +552,30 @@ public void acknowledgeSessionUpdate(String groupId, ShareRequestMetadata reqMet shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } - cache.touch(shareSession, time.milliseconds()); + cache.updateNumPartitions(shareSession); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); } } } + /** + * The handler for share version feature metadata changes. + * @param shareVersion the new share version feature + * @param isEnabledFromConfig whether the share version feature is enabled from config + */ + public void onShareVersionToggle(ShareVersion shareVersion, boolean isEnabledFromConfig) { + // Clear the cache and remove all share partitions from the cache if the share version does + // not support share groups. + if (!shareVersion.supportsShareGroups() && !isEnabledFromConfig) { + cache.removeAllSessions(); + Set sharePartitionKeys = partitionCache.cachedSharePartitionKeys(); + // Remove all share partitions from partition cache. + sharePartitionKeys.forEach(sharePartitionKey -> + removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager) + ); + } + } + /** * The cachedTopicIdPartitionsInShareSession method is used to get the cached topic-partitions in the share session. * @@ -640,24 +693,24 @@ void processShareFetch(ShareFetch shareFetch) { // Add the share fetch to the delayed share fetch purgatory to process the fetch request. // The request will be added irrespective of whether the share partition is initialized or not. // Once the share partition is initialized, the delayed share fetch will be completed. - addDelayedShareFetch(new DelayedShareFetch(shareFetch, replicaManager, fencedSharePartitionHandler(), sharePartitions, shareGroupMetrics, time), delayedShareFetchWatchKeys); + addDelayedShareFetch(new DelayedShareFetch(shareFetch, replicaManager, fencedSharePartitionHandler(), sharePartitions, shareGroupMetrics, time, remoteFetchMaxWaitMs), delayedShareFetchWatchKeys); } private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitionKey) { - return partitionCacheMap.computeIfAbsent(sharePartitionKey, + return partitionCache.computeIfAbsent(sharePartitionKey, k -> { int leaderEpoch = ShareFetchUtils.leaderEpoch(replicaManager, sharePartitionKey.topicIdPartition().topicPartition()); // Attach listener to Partition which shall invoke partition change handlers. // However, as there could be multiple share partitions (per group name) for a single topic-partition, // hence create separate listeners per share partition which holds the share partition key // to identify the respective share partition. - SharePartitionListener listener = new SharePartitionListener(sharePartitionKey, replicaManager, partitionCacheMap); + SharePartitionListener listener = new SharePartitionListener(sharePartitionKey, replicaManager, partitionCache); replicaManager.maybeAddListener(sharePartitionKey.topicIdPartition().topicPartition(), listener); return new SharePartition( sharePartitionKey.groupId(), sharePartitionKey.topicIdPartition(), leaderEpoch, - maxInFlightMessages, + maxInFlightRecords, maxDeliveryCount, defaultRecordLockDurationMs, timer, @@ -683,7 +736,7 @@ private void handleInitializationException( } // Remove the partition from the cache as it's failed to initialize. - removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); // The partition initialization failed, so add the partition to the erroneous partitions. log.debug("Error initializing share partition with key {}", sharePartitionKey, throwable); shareFetch.addErroneous(sharePartitionKey.topicIdPartition(), throwable); @@ -703,7 +756,7 @@ private BiConsumer fencedSharePartitionHandler() { // The share partition is fenced hence remove the partition from map and let the client retry. // But surface the error to the client so client might take some action i.e. re-fetch // the metadata and retry the fetch on new leader. - removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); } }; } @@ -714,13 +767,14 @@ private SharePartitionKey sharePartitionKey(String groupId, TopicIdPartition top private static void removeSharePartitionFromCache( SharePartitionKey sharePartitionKey, - Map map, + SharePartitionCache partitionCache, ReplicaManager replicaManager ) { - SharePartition sharePartition = map.remove(sharePartitionKey); + SharePartition sharePartition = partitionCache.remove(sharePartitionKey); if (sharePartition != null) { sharePartition.markFenced(); replicaManager.removeListener(sharePartitionKey.topicIdPartition().topicPartition(), sharePartition.listener()); + replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(sharePartitionKey.groupId(), sharePartitionKey.topicIdPartition())); } } @@ -751,16 +805,16 @@ static class SharePartitionListener implements PartitionListener { private final SharePartitionKey sharePartitionKey; private final ReplicaManager replicaManager; - private final Map partitionCacheMap; + private final SharePartitionCache partitionCache; SharePartitionListener( SharePartitionKey sharePartitionKey, ReplicaManager replicaManager, - Map partitionCacheMap + SharePartitionCache partitionCache ) { this.sharePartitionKey = sharePartitionKey; this.replicaManager = replicaManager; - this.partitionCacheMap = partitionCacheMap; + this.partitionCache = partitionCache; } @Override @@ -790,7 +844,65 @@ private void onUpdate(TopicPartition topicPartition) { topicPartition, sharePartitionKey); return; } - removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); + } + } + + /** + * The ShareGroupListenerImpl is used to listen for group events. The share group is associated + * with the group id, need to handle the group events for the share group. + */ + private class ShareGroupListenerImpl implements ShareGroupListener { + + @Override + public void onMemberLeave(String groupId, Uuid memberId) { + releaseSession(groupId, memberId.toString()); + } + + @Override + public void onGroupEmpty(String groupId) { + // Remove all share partitions from the cache. Instead of defining an API in SharePartitionCache + // for removing all share partitions for a group, share partitions are removed after fetching + // associated topic-partitions from the cache. This is done to mark the share partitions fenced + // and remove the listeners from the replica manager. + Set topicIdPartitions = partitionCache.topicIdPartitionsForGroup(groupId); + if (topicIdPartitions != null) { + // Remove all share partitions from partition cache. + topicIdPartitions.forEach(topicIdPartition -> + removeSharePartitionFromCache(new SharePartitionKey(groupId, topicIdPartition), partitionCache, replicaManager) + ); + } } } + + /** + * The IdleShareFetchTimerTask creates a timer task for a share fetch request which tries to initialize a new share + * session when the share session cache is full. Such a request is delayed for maxWaitMs by passing the corresponding + * IdleShareFetchTimerTask to {@link ReplicaManager#delayedShareFetchTimer}. + */ + private static class IdleShareFetchTimerTask extends TimerTask { + + /** + * This future is used to complete the share fetch request when the timer task is completed. + */ + private final CompletableFuture future; + + public IdleShareFetchTimerTask( + long delayMs, + CompletableFuture future + ) { + super(delayMs); + this.future = future; + } + + /** + * The run method which is executed when the timer task expires. This completes the future indicating that the + * delay for the corresponding share fetch request is over. + */ + @Override + public void run() { + future.complete(null); + } + } + } diff --git a/core/src/main/scala/kafka/admin/ConfigCommand.scala b/core/src/main/scala/kafka/admin/ConfigCommand.scala index 0f6b71d8092bd..f004b9956c86e 100644 --- a/core/src/main/scala/kafka/admin/ConfigCommand.scala +++ b/core/src/main/scala/kafka/admin/ConfigCommand.scala @@ -6,7 +6,7 @@ * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +21,7 @@ import joptsimple._ import kafka.server.DynamicConfig import kafka.utils.Implicits._ import kafka.utils.Logging -import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListGroupsOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} +import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListConfigResourcesOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.{InvalidConfigurationException, UnsupportedVersionException} import org.apache.kafka.common.internals.Topic @@ -86,13 +86,13 @@ object ConfigCommand extends Logging { opts.checkArgs() processCommand(opts) } catch { - case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) => - logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e) + case e: UnsupportedVersionException => + logger.debug(s"Unsupported API encountered in server when executing config command with args '${args.mkString(" ")}'") System.err.println(e.getMessage) Exit.exit(1) - case e: UnsupportedVersionException => - logger.debug(s"Unsupported API encountered in server when executing config command with args '${args.mkString(" ")}'") + case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) => + logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e) System.err.println(e.getMessage) Exit.exit(1) @@ -342,6 +342,42 @@ object ConfigCommand extends Logging { } private def describeResourceConfig(adminClient: Admin, entityType: String, entityName: Option[String], describeAll: Boolean): Unit = { + if (!describeAll) { + entityName.foreach { name => + entityType match { + case TopicType => + Topic.validate(name) + if (!adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names.get.contains(name)) { + System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") + return + } + case BrokerType | BrokerLoggerConfigType => + if (adminClient.describeCluster.nodes.get.stream.anyMatch(_.idString == name)) { + // valid broker id + } else if (name == BrokerDefaultEntityName) { + // default broker configs + } else { + System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") + return + } + case ClientMetricsType => + if (adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions).all.get + .stream.noneMatch(_.name == name)) { + System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") + return + } + case GroupType => + if (adminClient.listGroups().all.get.stream.noneMatch(_.groupId() == name) && + adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions).all.get + .stream.noneMatch(_.name == name)) { + System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") + return + } + case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") + } + } + } + val entities = entityName .map(name => List(name)) .getOrElse(entityType match { @@ -350,9 +386,10 @@ object ConfigCommand extends Logging { case BrokerType | BrokerLoggerConfigType => adminClient.describeCluster(new DescribeClusterOptions()).nodes().get().asScala.map(_.idString).toSeq :+ BrokerDefaultEntityName case ClientMetricsType => - adminClient.listClientMetricsResources().all().get().asScala.map(_.name).toSeq + adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions).all().get().asScala.map(_.name).toSeq case GroupType => - adminClient.listGroups(ListGroupsOptions.forConsumerGroups()).all.get.asScala.map(_.groupId).toSeq + adminClient.listGroups().all.get.asScala.map(_.groupId).toSet ++ + adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions).all().get().asScala.map(_.name).toSet case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") }) @@ -529,7 +566,7 @@ object ConfigCommand extends Logging { private val nl: String = System.lineSeparator() val addConfig: OptionSpec[String] = parser.accepts("add-config", "Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: 'k1=v1,k2=[v1,v2,v2],k3=v3'. The following is a list of valid configurations: " + - "For entity-type '" + TopicType + "': " + LogConfig.configNames.asScala.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + TopicType + "': " + LogConfig.nonInternalConfigNames.asScala.map("\t" + _).mkString(nl, nl, nl) + "For entity-type '" + BrokerType + "': " + DynamicConfig.Broker.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + "For entity-type '" + UserType + "': " + QuotaConfig.scramMechanismsPlusUserAndClientQuotaConfigs().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + "For entity-type '" + ClientType + "': " + QuotaConfig.userAndClientQuotaConfigs().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + diff --git a/core/src/main/scala/kafka/cluster/Partition.scala b/core/src/main/scala/kafka/cluster/Partition.scala index f0b3b3ed43fc3..53228873e5d90 100755 --- a/core/src/main/scala/kafka/cluster/Partition.scala +++ b/core/src/main/scala/kafka/cluster/Partition.scala @@ -19,8 +19,7 @@ package kafka.cluster import java.lang.{Long => JLong} import java.util.concurrent.locks.ReentrantReadWriteLock import java.util.Optional -import java.util.concurrent.{CompletableFuture, CopyOnWriteArrayList} -import kafka.controller.StateChangeLogger +import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, CopyOnWriteArrayList} import kafka.log._ import kafka.server._ import kafka.server.share.DelayedShareFetch @@ -37,13 +36,15 @@ import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch} import org.apache.kafka.common.requests._ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache} +import org.apache.kafka.logger.StateChangeLogger +import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, PartitionRegistration} import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.log.remote.TopicPartitionLog import org.apache.kafka.server.log.remote.storage.RemoteLogManager -import org.apache.kafka.storage.internals.log.{AppendOrigin, AsyncOffsetReader, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, AsyncOffsetReader, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetsListener, LogOffsetSnapshot, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, UnifiedLog, VerificationGuard} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, TopicPartitionOperationKey} +import org.apache.kafka.server.replica.Replica import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, UnexpectedAppendOffsetException} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints @@ -115,7 +116,10 @@ class DelayedOperations(topicId: Option[Uuid], } object Partition { - private val metricsGroup = new KafkaMetricsGroup(classOf[Partition]) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.cluster" + private val metricsClassName = "Partition" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) def apply(topicIdPartition: TopicIdPartition, time: Time, @@ -321,8 +325,8 @@ class Partition(val topicPartition: TopicPartition, def topic: String = topicPartition.topic def partitionId: Int = topicPartition.partition - private val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None) - private val remoteReplicasMap = new Pool[Int, Replica] + private val stateChangeLogger = new StateChangeLogger(localBrokerId) + private val remoteReplicasMap = new ConcurrentHashMap[Int, Replica] // The read lock is only required when multiple reads are executed and needs to be in a consistent manner private val leaderIsrUpdateLock = new ReentrantReadWriteLock @@ -604,7 +608,7 @@ class Partition(val topicPartition: TopicPartition, // remoteReplicas will be called in the hot path, and must be inexpensive def remoteReplicas: Iterable[Replica] = - remoteReplicasMap.values + remoteReplicasMap.values.asScala def futureReplicaDirChanged(newDestinationDir: String): Boolean = { inReadLock(leaderIsrUpdateLock) { @@ -729,7 +733,8 @@ class Partition(val topicPartition: TopicPartition, * from the time when this broker was the leader last time) and setting the new leader and ISR. * If the leader replica id does not change, return false to indicate the replica manager. */ - def makeLeader(partitionState: LeaderAndIsrRequest.PartitionState, + def makeLeader(partitionRegistration: PartitionRegistration, + isNew: Boolean, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetDirectoryId: Option[Uuid] = None): Boolean = { @@ -737,23 +742,23 @@ class Partition(val topicPartition: TopicPartition, // Partition state changes are expected to have a partition epoch larger or equal // to the current partition epoch. The latter is allowed because the partition epoch // is also updated by the AlterPartition response so the new epoch might be known - // before a LeaderAndIsr request is received or before an update is received via + // before a partitionRegistration is received or before an update is received via // the metadata log. - if (partitionState.partitionEpoch < partitionEpoch) { - stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId " + - s"and partition state $partitionState since the leader is already at a newer partition epoch $partitionEpoch.") + if (partitionRegistration.partitionEpoch < partitionEpoch) { + stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId, " + + s"partition registration $partitionRegistration and isNew=$isNew since the leader is already at a newer partition epoch $partitionEpoch.") return false } val currentTimeMs = time.milliseconds val isNewLeader = !isLeader - val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch - val replicas = partitionState.replicas.asScala.map(_.toInt) - val isr = partitionState.isr.asScala.map(_.toInt).toSet - val addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt) - val removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt) + val isNewLeaderEpoch = partitionRegistration.leaderEpoch > leaderEpoch + val replicas = partitionRegistration.replicas + val isr = partitionRegistration.isr.toSet + val addingReplicas = partitionRegistration.addingReplicas + val removingReplicas = partitionRegistration.removingReplicas - if (partitionState.leaderRecoveryState == LeaderRecoveryState.RECOVERING.value) { + if (partitionRegistration.leaderRecoveryState == LeaderRecoveryState.RECOVERING) { stateChangeLogger.info(s"The topic partition $topicPartition was marked as RECOVERING. " + "Marking the topic partition as RECOVERED.") } @@ -769,7 +774,7 @@ class Partition(val topicPartition: TopicPartition, LeaderRecoveryState.RECOVERED ) - createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetDirectoryId) + createLogInAssignedDirectoryId(isNew, highWatermarkCheckpoints, topicId, targetDirectoryId) val leaderLog = localLogOrException @@ -778,8 +783,8 @@ class Partition(val topicPartition: TopicPartition, if (isNewLeaderEpoch) { val leaderEpochStartOffset = leaderLog.logEndOffset stateChangeLogger.info(s"Leader $topicPartition with topic id $topicId starts at " + - s"leader epoch ${partitionState.leaderEpoch} from offset $leaderEpochStartOffset " + - s"with partition epoch ${partitionState.partitionEpoch}, high watermark ${leaderLog.highWatermark}, " + + s"leader epoch ${partitionRegistration.leaderEpoch} from offset $leaderEpochStartOffset " + + s"with partition epoch ${partitionRegistration.partitionEpoch}, high watermark ${leaderLog.highWatermark}, " + s"ISR ${isr.mkString("[", ",", "]")}, adding replicas ${addingReplicas.mkString("[", ",", "]")} and " + s"removing replicas ${removingReplicas.mkString("[", ",", "]")} ${if (isUnderMinIsr) "(under-min-isr)" else ""}. " + s"Previous leader $leaderReplicaIdOpt and previous leader epoch was $leaderEpoch.") @@ -789,32 +794,32 @@ class Partition(val topicPartition: TopicPartition, // to ensure that these followers can truncate to the right offset, we must cache the new // leader epoch and the start offset since it should be larger than any epoch that a follower // would try to query. - leaderLog.assignEpochStartOffset(partitionState.leaderEpoch, leaderEpochStartOffset) + leaderLog.assignEpochStartOffset(partitionRegistration.leaderEpoch, leaderEpochStartOffset) // Initialize lastCaughtUpTime of replicas as well as their lastFetchTimeMs and // lastFetchLeaderLogEndOffset. remoteReplicas.foreach { replica => replica.resetReplicaState( - currentTimeMs = currentTimeMs, - leaderEndOffset = leaderEpochStartOffset, - isNewLeader = isNewLeader, - isFollowerInSync = partitionState.isr.contains(replica.brokerId) + currentTimeMs, + leaderEpochStartOffset, + isNewLeader, + isr.contains(replica.brokerId) ) } // We update the leader epoch and the leader epoch start offset iff the // leader epoch changed. - leaderEpoch = partitionState.leaderEpoch + leaderEpoch = partitionRegistration.leaderEpoch leaderEpochStartOffsetOpt = Some(leaderEpochStartOffset) } else { - stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId " + - s"and partition state $partitionState since it is already the leader with leader epoch $leaderEpoch. " + + stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId, " + + s"partition registration $partitionRegistration and isNew=$isNew since it is already the leader with leader epoch $leaderEpoch. " + s"Current high watermark ${leaderLog.highWatermark}, ISR ${isr.mkString("[", ",", "]")}, " + s"adding replicas ${addingReplicas.mkString("[", ",", "]")} and " + s"removing replicas ${removingReplicas.mkString("[", ",", "]")}.") } - partitionEpoch = partitionState.partitionEpoch + partitionEpoch = partitionRegistration.partitionEpoch leaderReplicaIdOpt = Some(localBrokerId) // We may need to increment high watermark since ISR could be down to 1. @@ -835,46 +840,49 @@ class Partition(val topicPartition: TopicPartition, * replica manager that state is already correct and the become-follower steps can * be skipped. */ - def makeFollower(partitionState: LeaderAndIsrRequest.PartitionState, + def makeFollower(partitionRegistration: PartitionRegistration, + isNew: Boolean, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid] = None): Boolean = { inWriteLock(leaderIsrUpdateLock) { - if (partitionState.partitionEpoch < partitionEpoch) { - stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId " + - s"and partition state $partitionState since the follower is already at a newer partition epoch $partitionEpoch.") + if (partitionRegistration.partitionEpoch < partitionEpoch) { + stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId, " + + s"partition registration $partitionRegistration and isNew=$isNew since the follower is already at a newer partition epoch $partitionEpoch.") return false } - val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch + val isNewLeaderEpoch = partitionRegistration.leaderEpoch > leaderEpoch // The leader should be updated before updateAssignmentAndIsr where we clear the ISR. Or it is possible to meet // the under min isr condition during the makeFollower process and emits the wrong metric. - leaderReplicaIdOpt = Option(partitionState.leader) - leaderEpoch = partitionState.leaderEpoch + val prevLeaderReplicaIdOpt = leaderReplicaIdOpt + val prevLeaderEpoch = leaderEpoch + leaderReplicaIdOpt = Option(partitionRegistration.leader) + leaderEpoch = partitionRegistration.leaderEpoch leaderEpochStartOffsetOpt = None - partitionEpoch = partitionState.partitionEpoch + partitionEpoch = partitionRegistration.partitionEpoch updateAssignmentAndIsr( - replicas = partitionState.replicas.asScala.iterator.map(_.toInt).toSeq, + replicas = partitionRegistration.replicas, isLeader = false, isr = Set.empty, - addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt), - removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt), - LeaderRecoveryState.of(partitionState.leaderRecoveryState) + addingReplicas = partitionRegistration.addingReplicas, + removingReplicas = partitionRegistration.removingReplicas, + partitionRegistration.leaderRecoveryState ) - createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetLogDirectoryId) + createLogInAssignedDirectoryId(isNew, highWatermarkCheckpoints, topicId, targetLogDirectoryId) val followerLog = localLogOrException if (isNewLeaderEpoch) { val leaderEpochEndOffset = followerLog.logEndOffset - stateChangeLogger.info(s"Follower $topicPartition starts at leader epoch ${partitionState.leaderEpoch} from " + - s"offset $leaderEpochEndOffset with partition epoch ${partitionState.partitionEpoch} and " + - s"high watermark ${followerLog.highWatermark}. Current leader is ${partitionState.leader}. " + - s"Previous leader $leaderReplicaIdOpt and previous leader epoch was $leaderEpoch.") + stateChangeLogger.info(s"Follower $topicPartition starts at leader epoch ${partitionRegistration.leaderEpoch} from " + + s"offset $leaderEpochEndOffset with partition epoch ${partitionRegistration.partitionEpoch} and " + + s"high watermark ${followerLog.highWatermark}. Current leader is ${partitionRegistration.leader}. " + + s"Previous leader $prevLeaderReplicaIdOpt and previous leader epoch was $prevLeaderEpoch.") } else { - stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId " + - s"and partition state $partitionState since it is already a follower with leader epoch $leaderEpoch.") + stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId, " + + s"partition registration $partitionRegistration and isNew=$isNew since it is already a follower with leader epoch $leaderEpoch.") } // We must restart the fetchers when the leader epoch changed regardless of @@ -883,11 +891,11 @@ class Partition(val topicPartition: TopicPartition, } } - private def createLogInAssignedDirectoryId(partitionState: LeaderAndIsrRequest.PartitionState, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid]): Unit = { + private def createLogInAssignedDirectoryId(isNew: Boolean, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid]): Unit = { targetLogDirectoryId match { case Some(directoryId) => if (logManager.onlineLogDirId(directoryId) || !logManager.hasOfflineLogDirs() || directoryId == DirectoryId.UNASSIGNED) { - createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId, targetLogDirectoryId) + createLogIfNotExists(isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId, targetLogDirectoryId) } else { warn(s"Skipping creation of log because there are potentially offline log " + s"directories and log may already exist there. directoryId=$directoryId, " + @@ -895,7 +903,7 @@ class Partition(val topicPartition: TopicPartition, } case None => - createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId) + createLogIfNotExists(isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId) } } @@ -983,12 +991,11 @@ class Partition(val topicPartition: TopicPartition, ): Unit = { if (isLeader) { val followers = replicas.filter(_ != localBrokerId) - val removedReplicas = remoteReplicasMap.keys.filterNot(followers.contains(_)) // Due to code paths accessing remoteReplicasMap without a lock, // first add the new replicas and then remove the old ones. - followers.foreach(id => remoteReplicasMap.getAndMaybePut(id, new Replica(id, topicPartition, metadataCache))) - remoteReplicasMap.removeAll(removedReplicas) + followers.foreach(id => remoteReplicasMap.computeIfAbsent(id, _ => new Replica(id, topicPartition, metadataCache))) + remoteReplicasMap.keySet.removeIf(replica => !followers.contains(replica)) } else { remoteReplicasMap.clear() } @@ -1073,9 +1080,9 @@ class Partition(val topicPartition: TopicPartition, isBrokerEpochIsrEligible(storedBrokerEpoch, cachedBrokerEpoch) } - private def isBrokerEpochIsrEligible(storedBrokerEpoch: Option[Long], cachedBrokerEpoch: Optional[java.lang.Long]): Boolean = { - storedBrokerEpoch.isDefined && cachedBrokerEpoch.isPresent() && - (storedBrokerEpoch.get == -1 || storedBrokerEpoch.get == cachedBrokerEpoch.get()) + private def isBrokerEpochIsrEligible(storedBrokerEpoch: Optional[java.lang.Long], cachedBrokerEpoch: Optional[java.lang.Long]): Boolean = { + storedBrokerEpoch.isPresent && cachedBrokerEpoch.isPresent && + (storedBrokerEpoch.get == -1 || storedBrokerEpoch.get == cachedBrokerEpoch.get) } /* @@ -1158,7 +1165,7 @@ class Partition(val topicPartition: TopicPartition, // avoid unnecessary collection generation val leaderLogEndOffset = leaderLog.logEndOffsetMetadata var newHighWatermark = leaderLogEndOffset - remoteReplicasMap.values.foreach { replica => + remoteReplicasMap.forEach { (_, replica) => val replicaState = replica.stateSnapshot def shouldWaitForReplicaToJoinIsr: Boolean = { @@ -1656,7 +1663,7 @@ class Partition(val topicPartition: TopicPartition, def deleteRecordsOnLeader(offset: Long): LogDeleteRecordsResult = inReadLock(leaderIsrUpdateLock) { leaderLogIfLocal match { case Some(leaderLog) => - if (!leaderLog.config.delete) + if (!leaderLog.config.delete && leaderLog.config.compact) throw new PolicyViolationException(s"Records of partition $topicPartition can not be deleted due to the configured policy") val convertedOffset = if (offset == DeleteRecordsRequest.HIGH_WATERMARK) @@ -1731,7 +1738,7 @@ class Partition(val topicPartition: TopicPartition, case Some(epochAndOffset) => new EpochEndOffset() .setPartition(partitionId) .setErrorCode(Errors.NONE.code) - .setLeaderEpoch(epochAndOffset.leaderEpoch) + .setLeaderEpoch(epochAndOffset.epoch()) .setEndOffset(epochAndOffset.offset) case None => new EpochEndOffset() .setPartition(partitionId) @@ -1803,7 +1810,7 @@ class Partition(val topicPartition: TopicPartition, brokerState.setBrokerEpoch(localBrokerEpochSupplier()) } else { val replica = remoteReplicasMap.get(brokerId) - val brokerEpoch = if (replica == null) Option.empty else replica.stateSnapshot.brokerEpoch + val brokerEpoch = if (replica == null) Optional.empty else replica.stateSnapshot.brokerEpoch if (brokerEpoch.isEmpty) { // There are two cases where the broker epoch can be missing: // 1. During ISR expansion, we already held lock for the partition and did the broker epoch check, so the new diff --git a/core/src/main/scala/kafka/cluster/Replica.scala b/core/src/main/scala/kafka/cluster/Replica.scala deleted file mode 100644 index 1ca63ac11b8c0..0000000000000 --- a/core/src/main/scala/kafka/cluster/Replica.scala +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.cluster - -import kafka.utils.Logging -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.NotLeaderOrFollowerException -import org.apache.kafka.metadata.MetadataCache -import org.apache.kafka.storage.internals.log.{LogOffsetMetadata, UnifiedLog} - -import java.util.concurrent.atomic.AtomicReference - -case class ReplicaState( - // The log start offset value, kept in all replicas; for local replica it is the - // log's start offset, for remote replicas its value is only updated by follower fetch. - logStartOffset: Long, - - // The log end offset value, kept in all replicas; for local replica it is the - // log's end offset, for remote replicas its value is only updated by follower fetch. - logEndOffsetMetadata: LogOffsetMetadata, - - // The log end offset value at the time the leader received the last FetchRequest from this follower. - // This is used to determine the lastCaughtUpTimeMs of the follower. It is reset by the leader - // when a LeaderAndIsr request is received and might be reset when the leader appends a record - // to its log. - lastFetchLeaderLogEndOffset: Long, - - // The time when the leader received the last FetchRequest from this follower. - // This is used to determine the lastCaughtUpTimeMs of the follower. - lastFetchTimeMs: Long, - - // lastCaughtUpTimeMs is the largest time t such that the offset of most recent FetchRequest from this follower >= - // the LEO of leader at time t. This is used to determine the lag of this follower and ISR of this partition. - lastCaughtUpTimeMs: Long, - - // The brokerEpoch is the epoch from the Fetch request. - brokerEpoch: Option[Long] -) { - /** - * Returns the current log end offset of the replica. - */ - def logEndOffset: Long = logEndOffsetMetadata.messageOffset - - /** - * Returns true when the replica is considered as "caught-up". A replica is - * considered "caught-up" when its log end offset is equals to the log end - * offset of the leader OR when its last caught up time minus the current - * time is smaller than the max replica lag. - */ - def isCaughtUp( - leaderEndOffset: Long, - currentTimeMs: Long, - replicaMaxLagMs: Long - ): Boolean = { - leaderEndOffset == logEndOffset || currentTimeMs - lastCaughtUpTimeMs <= replicaMaxLagMs - } -} - -object ReplicaState { - val Empty: ReplicaState = ReplicaState( - logEndOffsetMetadata = LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - lastFetchLeaderLogEndOffset = 0L, - lastFetchTimeMs = 0L, - lastCaughtUpTimeMs = 0L, - brokerEpoch = None : Option[Long], - ) -} - -class Replica(val brokerId: Int, val topicPartition: TopicPartition, val metadataCache: MetadataCache) extends Logging { - private val replicaState = new AtomicReference[ReplicaState](ReplicaState.Empty) - - def stateSnapshot: ReplicaState = replicaState.get - - /** - * Update the replica's fetch state only if the broker epoch is -1 or it is larger or equal to the current broker - * epoch. Otherwise, NOT_LEADER_OR_FOLLOWER exception will be thrown. This can fence fetch state update from a - * stale request. - * - * If the FetchRequest reads up to the log end offset of the leader when the current fetch request is received, - * set `lastCaughtUpTimeMs` to the time when the current fetch request was received. - * - * Else if the FetchRequest reads up to the log end offset of the leader when the previous fetch request was received, - * set `lastCaughtUpTimeMs` to the time when the previous fetch request was received. - * - * This is needed to enforce the semantics of ISR, i.e. a replica is in ISR if and only if it lags behind leader's LEO - * by at most `replicaLagTimeMaxMs`. These semantics allow a follower to be added to the ISR even if the offset of its - * fetch request is always smaller than the leader's LEO, which can happen if small produce requests are received at - * high frequency. - */ - def updateFetchStateOrThrow( - followerFetchOffsetMetadata: LogOffsetMetadata, - followerStartOffset: Long, - followerFetchTimeMs: Long, - leaderEndOffset: Long, - brokerEpoch: Long - ): Unit = { - replicaState.updateAndGet { currentReplicaState => - val cachedBrokerEpoch = metadataCache.getAliveBrokerEpoch(brokerId) - // Fence the update if it provides a stale broker epoch. - if (brokerEpoch != -1 && cachedBrokerEpoch.filter(_ > brokerEpoch).isPresent()) { - throw new NotLeaderOrFollowerException(s"Received stale fetch state update. broker epoch=$brokerEpoch " + - s"vs expected=${currentReplicaState.brokerEpoch.get}") - } - - val lastCaughtUpTime = if (followerFetchOffsetMetadata.messageOffset >= leaderEndOffset) { - math.max(currentReplicaState.lastCaughtUpTimeMs, followerFetchTimeMs) - } else if (followerFetchOffsetMetadata.messageOffset >= currentReplicaState.lastFetchLeaderLogEndOffset) { - math.max(currentReplicaState.lastCaughtUpTimeMs, currentReplicaState.lastFetchTimeMs) - } else { - currentReplicaState.lastCaughtUpTimeMs - } - - ReplicaState( - logStartOffset = followerStartOffset, - logEndOffsetMetadata = followerFetchOffsetMetadata, - lastFetchLeaderLogEndOffset = math.max(leaderEndOffset, currentReplicaState.lastFetchLeaderLogEndOffset), - lastFetchTimeMs = followerFetchTimeMs, - lastCaughtUpTimeMs = lastCaughtUpTime, - brokerEpoch = Option(brokerEpoch) - ) - } - } - - /** - * When the leader is elected or re-elected, the state of the follower is reinitialized - * accordingly. - */ - def resetReplicaState( - currentTimeMs: Long, - leaderEndOffset: Long, - isNewLeader: Boolean, - isFollowerInSync: Boolean - ): Unit = { - replicaState.updateAndGet { currentReplicaState => - // When the leader is elected or re-elected, the follower's last caught up time - // is set to the current time if the follower is in the ISR, else to 0. The latter - // is done to ensure that the high watermark is not hold back unnecessarily for - // a follower which is not in the ISR anymore. - val lastCaughtUpTimeMs = if (isFollowerInSync) currentTimeMs else 0L - - if (isNewLeader) { - ReplicaState( - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffsetMetadata = LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, - lastFetchLeaderLogEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastFetchTimeMs = 0L, - lastCaughtUpTimeMs = lastCaughtUpTimeMs, - brokerEpoch = Option.empty - ) - } else { - ReplicaState( - logStartOffset = currentReplicaState.logStartOffset, - logEndOffsetMetadata = currentReplicaState.logEndOffsetMetadata, - lastFetchLeaderLogEndOffset = leaderEndOffset, - // When the leader is re-elected, the follower's last fetch time is - // set to the current time if the follower is in the ISR, else to 0. - // The latter is done to ensure that the follower is not brought back - // into the ISR before a fetch is received. - lastFetchTimeMs = if (isFollowerInSync) currentTimeMs else 0L, - lastCaughtUpTimeMs = lastCaughtUpTimeMs, - brokerEpoch = currentReplicaState.brokerEpoch - ) - } - } - trace(s"Reset state of replica to $this") - } - - override def toString: String = { - val replicaState = this.replicaState.get - val replicaString = new StringBuilder - replicaString.append(s"Replica(replicaId=$brokerId") - replicaString.append(s", topic=${topicPartition.topic}") - replicaString.append(s", partition=${topicPartition.partition}") - replicaString.append(s", lastCaughtUpTimeMs=${replicaState.lastCaughtUpTimeMs}") - replicaString.append(s", logStartOffset=${replicaState.logStartOffset}") - replicaString.append(s", logEndOffset=${replicaState.logEndOffsetMetadata.messageOffset}") - replicaString.append(s", logEndOffsetMetadata=${replicaState.logEndOffsetMetadata}") - replicaString.append(s", lastFetchLeaderLogEndOffset=${replicaState.lastFetchLeaderLogEndOffset}") - replicaString.append(s", brokerEpoch=${replicaState.brokerEpoch.getOrElse(-2L)}") - replicaString.append(s", lastFetchTimeMs=${replicaState.lastFetchTimeMs}") - replicaString.append(")") - replicaString.toString - } - - override def equals(that: Any): Boolean = that match { - case other: Replica => brokerId == other.brokerId && topicPartition == other.topicPartition - case _ => false - } - - override def hashCode: Int = 31 + topicPartition.hashCode + 17 * brokerId -} diff --git a/core/src/main/scala/kafka/controller/StateChangeLogger.scala b/core/src/main/scala/kafka/controller/StateChangeLogger.scala deleted file mode 100644 index 9f188fe33b74a..0000000000000 --- a/core/src/main/scala/kafka/controller/StateChangeLogger.scala +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.controller - -import com.typesafe.scalalogging.Logger -import kafka.utils.Logging - -object StateChangeLogger { - private val logger = Logger("state.change.logger") -} - -/** - * Simple class that sets `logIdent` appropriately depending on whether the state change logger is being used in the - * context of the KafkaController or not (e.g. ReplicaManager and MetadataCache log to the state change logger - * irrespective of whether the broker is the Controller). - */ -class StateChangeLogger(brokerId: Int, inControllerContext: Boolean, controllerEpoch: Option[Int]) extends Logging { - - if (controllerEpoch.isDefined && !inControllerContext) - throw new IllegalArgumentException("Controller epoch should only be defined if inControllerContext is true") - - override lazy val logger: Logger = StateChangeLogger.logger - - locally { - val prefix = if (inControllerContext) "Controller" else "Broker" - val epochEntry = controllerEpoch.fold("")(epoch => s" epoch=$epoch") - logIdent = s"[$prefix id=$brokerId$epochEntry] " - } - -} diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala deleted file mode 100644 index 44a9b9565841d..0000000000000 --- a/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.group - -import kafka.server.ReplicaManager -import kafka.utils.Logging -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.NotLeaderOrFollowerException -import org.apache.kafka.common.record.{ControlRecordType, FileRecords, MemoryRecords} -import org.apache.kafka.common.requests.TransactionResult -import org.apache.kafka.common.utils.Time -import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader.LoadSummary -import org.apache.kafka.coordinator.common.runtime.Deserializer.UnknownRecordTypeException -import org.apache.kafka.coordinator.common.runtime.{CoordinatorLoader, CoordinatorPlayback, Deserializer} -import org.apache.kafka.server.storage.log.FetchIsolation -import org.apache.kafka.server.util.KafkaScheduler - -import java.nio.ByteBuffer -import java.util.concurrent.CompletableFuture -import java.util.concurrent.atomic.AtomicBoolean -import scala.jdk.CollectionConverters._ - -/** - * Coordinator loader which reads records from a partition and replays them - * to a group coordinator. - * - * @param replicaManager The replica manager. - * @param deserializer The deserializer to use. - * @param loadBufferSize The load buffer size. - * @tparam T The record type. - */ -class CoordinatorLoaderImpl[T]( - time: Time, - replicaManager: ReplicaManager, - deserializer: Deserializer[T], - loadBufferSize: Int -) extends CoordinatorLoader[T] with Logging { - private val isRunning = new AtomicBoolean(true) - private val scheduler = new KafkaScheduler(1) - scheduler.startup() - - /** - * Loads the coordinator by reading all the records from the TopicPartition - * and applying them to the Replayable object. - * - * @param tp The TopicPartition to read from. - * @param coordinator The object to apply records to. - */ - override def load( - tp: TopicPartition, - coordinator: CoordinatorPlayback[T] -): CompletableFuture[LoadSummary] = { - val future = new CompletableFuture[LoadSummary]() - val startTimeMs = time.milliseconds() - val result = scheduler.scheduleOnce(s"Load coordinator from $tp", - () => doLoad(tp, coordinator, future, startTimeMs)) - if (result.isCancelled) { - future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")) - } - future - } - - private def doLoad( - tp: TopicPartition, - coordinator: CoordinatorPlayback[T], - future: CompletableFuture[LoadSummary], - startTimeMs: Long - ): Unit = { - val schedulerQueueTimeMs = time.milliseconds() - startTimeMs - try { - replicaManager.getLog(tp) match { - case None => - future.completeExceptionally(new NotLeaderOrFollowerException( - s"Could not load records from $tp because the log does not exist.")) - - case Some(log) => - def logEndOffset: Long = replicaManager.getLogEndOffset(tp).getOrElse(-1L) - - // Buffer may not be needed if records are read from memory. - var buffer = ByteBuffer.allocate(0) - // Loop breaks if leader changes at any time during the load, since logEndOffset is -1. - var currentOffset = log.logStartOffset - // Loop breaks if no records have been read, since the end of the log has been reached. - // This is to ensure that the loop breaks even if the current offset remains smaller than - // the log end offset but the log is empty. This could happen with compacted topics. - var readAtLeastOneRecord = true - - var previousHighWatermark = -1L - var numRecords = 0L - var numBytes = 0L - while (currentOffset < logEndOffset && readAtLeastOneRecord && isRunning.get) { - val fetchDataInfo = log.read(currentOffset, loadBufferSize, FetchIsolation.LOG_END, true) - - readAtLeastOneRecord = fetchDataInfo.records.sizeInBytes > 0 - - val memoryRecords = (fetchDataInfo.records: @unchecked) match { - case records: MemoryRecords => - records - - case fileRecords: FileRecords => - val sizeInBytes = fileRecords.sizeInBytes - val bytesNeeded = Math.max(loadBufferSize, sizeInBytes) - - // "minOneMessage = true in the above log.read() means that the buffer may need to - // be grown to ensure progress can be made. - if (buffer.capacity < bytesNeeded) { - if (loadBufferSize < bytesNeeded) - warn(s"Loaded metadata from $tp with buffer larger ($bytesNeeded bytes) than " + - s"configured buffer size ($loadBufferSize bytes).") - - buffer = ByteBuffer.allocate(bytesNeeded) - } else { - buffer.clear() - } - - fileRecords.readInto(buffer, 0) - MemoryRecords.readableRecords(buffer) - } - - memoryRecords.batches.forEach { batch => - if (batch.isControlBatch) { - batch.asScala.foreach { record => - val controlRecord = ControlRecordType.parse(record.key) - if (controlRecord == ControlRecordType.COMMIT) { - if (isTraceEnabled) { - trace(s"Replaying end transaction marker from $tp at offset ${record.offset} to commit transaction " + - s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") - } - coordinator.replayEndTransactionMarker( - batch.producerId, - batch.producerEpoch, - TransactionResult.COMMIT - ) - } else if (controlRecord == ControlRecordType.ABORT) { - if (isTraceEnabled) { - trace(s"Replaying end transaction marker from $tp at offset ${record.offset} to abort transaction " + - s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") - } - coordinator.replayEndTransactionMarker( - batch.producerId, - batch.producerEpoch, - TransactionResult.ABORT - ) - } - } - } else { - batch.asScala.foreach { record => - numRecords = numRecords + 1 - - val coordinatorRecordOpt = { - try { - Some(deserializer.deserialize(record.key, record.value)) - } catch { - case ex: UnknownRecordTypeException => - warn(s"Unknown record type ${ex.unknownType} while loading offsets and group metadata " + - s"from $tp. Ignoring it. It could be a left over from an aborted upgrade.") - None - case ex: RuntimeException => - val msg = s"Deserializing record $record from $tp failed due to: ${ex.getMessage}" - error(s"$msg.") - throw new RuntimeException(msg, ex) - } - } - - coordinatorRecordOpt.foreach { coordinatorRecord => - try { - if (isTraceEnabled) { - trace(s"Replaying record $coordinatorRecord from $tp at offset ${record.offset()} " + - s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") - } - coordinator.replay( - record.offset(), - batch.producerId, - batch.producerEpoch, - coordinatorRecord - ) - } catch { - case ex: RuntimeException => - val msg = s"Replaying record $coordinatorRecord from $tp at offset ${record.offset()} " + - s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch} " + - s"failed due to: ${ex.getMessage}" - error(s"$msg.") - throw new RuntimeException(msg, ex) - } - } - } - } - - // Note that the high watermark can be greater than the current offset but as we load more records - // the current offset will eventually surpass the high watermark. Also note that the high watermark - // will continue to advance while loading. - currentOffset = batch.nextOffset - val currentHighWatermark = log.highWatermark - if (currentOffset >= currentHighWatermark) { - coordinator.updateLastWrittenOffset(currentOffset) - - if (currentHighWatermark > previousHighWatermark) { - coordinator.updateLastCommittedOffset(currentHighWatermark) - previousHighWatermark = currentHighWatermark - } - } - } - numBytes = numBytes + memoryRecords.sizeInBytes() - } - - val endTimeMs = time.milliseconds() - - if (logEndOffset == -1L) { - future.completeExceptionally(new NotLeaderOrFollowerException( - s"Stopped loading records from $tp because the partition is not online or is no longer the leader." - )) - } else if (isRunning.get) { - future.complete(new LoadSummary(startTimeMs, endTimeMs, schedulerQueueTimeMs, numRecords, numBytes)) - } else { - future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")) - } - } - } catch { - case ex: Throwable => - future.completeExceptionally(ex) - } - } - - /** - * Closes the loader. - */ - override def close(): Unit = { - if (!isRunning.compareAndSet(true, false)) { - warn("Coordinator loader is already shutting down.") - return - } - scheduler.shutdown() - } -} diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala index d6ea17fe7946b..dbbdbb09868e8 100644 --- a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala +++ b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala @@ -17,13 +17,14 @@ package kafka.coordinator.group import kafka.cluster.PartitionListener -import kafka.server.{AddPartitionsToTxnManager, ReplicaManager} -import org.apache.kafka.common.TopicPartition +import kafka.server.ReplicaManager +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} import org.apache.kafka.coordinator.common.runtime.PartitionWriter import org.apache.kafka.server.ActionQueue import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import java.util.concurrent.CompletableFuture @@ -139,11 +140,12 @@ class CoordinatorPartitionWriter( records: MemoryRecords ): Long = { // We write synchronously to the leader replica without waiting on replication. + val topicIdPartition: TopicIdPartition = replicaManager.topicIdPartition(tp) val appendResults = replicaManager.appendRecordsToLeader( requiredAcks = 1, internalTopicsAllowed = true, origin = AppendOrigin.COORDINATOR, - entriesPerPartition = Map(tp -> records), + entriesPerPartition = Map(topicIdPartition -> records), requestLocal = RequestLocal.noCaching, verificationGuards = Map(tp -> verificationGuard), // We can directly complete the purgatories here because we don't hold @@ -151,7 +153,7 @@ class CoordinatorPartitionWriter( actionQueue = directActionQueue ) - val partitionResult = appendResults.getOrElse(tp, + val partitionResult = appendResults.getOrElse(topicIdPartition, throw new IllegalStateException(s"Append status $appendResults should have partition $tp.")) if (partitionResult.error != Errors.NONE) { diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala index 7130d39136e9f..1e348b19b3e1b 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala @@ -27,14 +27,15 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{AddPartitionsToTxnResponse, TransactionResult} import org.apache.kafka.common.utils.{LogContext, ProducerIdAndEpoch, Time} -import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionLogConfig, TransactionStateManagerConfig} +import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionLogConfig, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{RequestLocal, TransactionVersion} import org.apache.kafka.server.util.Scheduler +import java.util import java.util.Properties import java.util.concurrent.atomic.AtomicBoolean -import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ object TransactionCoordinator { @@ -147,17 +148,18 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val coordinatorEpochAndMetadata = txnManager.getTransactionState(transactionalId).flatMap { case None => try { - val createdMetadata = new TransactionMetadata(transactionalId = transactionalId, - producerId = producerIdManager.generateProducerId(), - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = resolvedTxnTimeoutMs, - state = Empty, - topicPartitions = collection.mutable.Set.empty[TopicPartition], - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TransactionVersion.TV_0) + val createdMetadata = new TransactionMetadata(transactionalId, + producerIdManager.generateProducerId(), + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + RecordBatch.NO_PRODUCER_EPOCH, + resolvedTxnTimeoutMs, + TransactionState.EMPTY, + util.Set.of(), + -1, + time.milliseconds(), + TransactionVersion.TV_0) txnManager.putTransactionStateIfNotExists(createdMetadata) } catch { case e: Exception => Left(Errors.forException(e)) @@ -171,10 +173,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val coordinatorEpoch = existingEpochAndMetadata.coordinatorEpoch val txnMetadata = existingEpochAndMetadata.transactionMetadata - txnMetadata.inLock { + txnMetadata.inLock(() => prepareInitProducerIdTransit(transactionalId, resolvedTxnTimeoutMs, coordinatorEpoch, txnMetadata, expectedProducerIdAndEpoch) - } + ) } result match { @@ -182,7 +184,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, responseCallback(initTransactionError(error)) case Right((coordinatorEpoch, newMetadata)) => - if (newMetadata.txnState == PrepareEpochFence) { + if (newMetadata.txnState == TransactionState.PREPARE_EPOCH_FENCE) { // abort the ongoing transaction and then return CONCURRENT_TRANSACTIONS to let client wait and retry def sendRetriableErrorCallback(error: Errors, newProducerId: Long, newProducerEpoch: Short): Unit = { if (error != Errors.NONE) { @@ -249,24 +251,23 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } else { // caller should have synchronized on txnMetadata already txnMetadata.state match { - case PrepareAbort | PrepareCommit => + case TransactionState.PREPARE_ABORT | TransactionState.PREPARE_COMMIT => // reply to client and let it backoff and retry Left(Errors.CONCURRENT_TRANSACTIONS) - case CompleteAbort | CompleteCommit | Empty => + case TransactionState.COMPLETE_ABORT | TransactionState.COMPLETE_COMMIT | TransactionState.EMPTY => val transitMetadataResult = // If the epoch is exhausted and the expected epoch (if provided) matches it, generate a new producer ID - if (txnMetadata.isProducerEpochExhausted && - expectedProducerIdAndEpoch.forall(_.epoch == txnMetadata.producerEpoch)) { - try { + try { + if (txnMetadata.isProducerEpochExhausted && + expectedProducerIdAndEpoch.forall(_.epoch == txnMetadata.producerEpoch)) Right(txnMetadata.prepareProducerIdRotation(producerIdManager.generateProducerId(), transactionTimeoutMs, time.milliseconds(), expectedProducerIdAndEpoch.isDefined)) - } catch { - case e: Exception => Left(Errors.forException(e)) - } - } else { - txnMetadata.prepareIncrementProducerEpoch(transactionTimeoutMs, expectedProducerIdAndEpoch.map(_.epoch), - time.milliseconds()) + else + Right(txnMetadata.prepareIncrementProducerEpoch(transactionTimeoutMs, expectedProducerIdAndEpoch.map(e => Short.box(e.epoch)).toJava, + time.milliseconds())) + } catch { + case e: Exception => Left(Errors.forException(e)) } transitMetadataResult match { @@ -274,7 +275,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Left(err) => Left(err) } - case Ongoing => + case TransactionState.ONGOING => // indicate to abort the current ongoing txn first. Note that this epoch is never returned to the // user. We will abort the ongoing transaction and return CONCURRENT_TRANSACTIONS to the client. // This forces the client to retry, which will ensure that the epoch is bumped a second time. In @@ -282,7 +283,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // then when the client retries, we will generate a new producerId. Right(coordinatorEpoch, txnMetadata.prepareFenceProducerEpoch()) - case Dead | PrepareEpochFence => + case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) @@ -294,12 +295,13 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleListTransactions( filteredProducerIds: Set[Long], filteredStates: Set[String], - filteredDuration: Long = -1L + filteredDuration: Long = -1L, + filteredTransactionalIdPattern: String = null ): ListTransactionsResponseData = { if (!isActive.get()) { new ListTransactionsResponseData().setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code) } else { - txnManager.listTransactionStates(filteredProducerIds, filteredStates, filteredDuration) + txnManager.listTransactionStates(filteredProducerIds, filteredStates, filteredDuration, filteredTransactionalIdPattern) } } @@ -325,12 +327,12 @@ class TransactionCoordinator(txnConfig: TransactionConfig, transactionState.setErrorCode(Errors.TRANSACTIONAL_ID_NOT_FOUND.code) case Right(Some(coordinatorEpochAndMetadata)) => val txnMetadata = coordinatorEpochAndMetadata.transactionMetadata - txnMetadata.inLock { - if (txnMetadata.state == Dead) { + txnMetadata.inLock(() => { + if (txnMetadata.state == TransactionState.DEAD) { // The transaction state is being expired, so ignore it transactionState.setErrorCode(Errors.TRANSACTIONAL_ID_NOT_FOUND.code) } else { - txnMetadata.topicPartitions.foreach { topicPartition => + txnMetadata.topicPartitions.forEach(topicPartition => { var topicData = transactionState.topics.find(topicPartition.topic) if (topicData == null) { topicData = new DescribeTransactionsResponseData.TopicData() @@ -338,17 +340,17 @@ class TransactionCoordinator(txnConfig: TransactionConfig, transactionState.topics.add(topicData) } topicData.partitions.add(topicPartition.partition) - } + }) transactionState .setErrorCode(Errors.NONE.code) .setProducerId(txnMetadata.producerId) .setProducerEpoch(txnMetadata.producerEpoch) - .setTransactionState(txnMetadata.state.name) + .setTransactionState(txnMetadata.state.stateName) .setTransactionTimeoutMs(txnMetadata.txnTimeoutMs) .setTransactionStartTimeMs(txnMetadata.txnStartTimestamp) } - } + }) } } } @@ -356,13 +358,15 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleVerifyPartitionsInTransaction(transactionalId: String, producerId: Long, producerEpoch: Short, - partitions: collection.Set[TopicPartition], + partitions: util.Set[TopicPartition], responseCallback: VerifyPartitionsCallback): Unit = { if (transactionalId == null || transactionalId.isEmpty) { debug(s"Returning ${Errors.INVALID_REQUEST} error code to client for $transactionalId's AddPartitions request for verification") - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitions.map(_ -> Errors.INVALID_REQUEST).toMap.asJava)) + val errors = new util.HashMap[TopicPartition, Errors]() + partitions.forEach(partition => errors.put(partition, Errors.INVALID_REQUEST)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) } else { - val result: ApiResult[Map[TopicPartition, Errors]] = + val result: ApiResult[util.Map[TopicPartition, Errors]] = txnManager.getTransactionState(transactionalId).flatMap { case None => Left(Errors.INVALID_PRODUCER_ID_MAPPING) @@ -372,31 +376,35 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // Given the txnMetadata is valid, we check if the partitions are in the transaction. // Pending state is not checked since there is a final validation on the append to the log. // Partitions are added to metadata when the add partitions state is persisted, and removed when the end marker is persisted. - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.producerId != producerId) { Left(Errors.INVALID_PRODUCER_ID_MAPPING) } else if (txnMetadata.producerEpoch != producerEpoch) { Left(Errors.PRODUCER_FENCED) - } else if (txnMetadata.state == PrepareCommit || txnMetadata.state == PrepareAbort) { + } else if (txnMetadata.state == TransactionState.PREPARE_COMMIT || txnMetadata.state == TransactionState.PREPARE_ABORT) { Left(Errors.CONCURRENT_TRANSACTIONS) } else { - Right(partitions.map { part => + val errors = new util.HashMap[TopicPartition, Errors]() + partitions.forEach(part => { if (txnMetadata.topicPartitions.contains(part)) - (part, Errors.NONE) + errors.put(part, Errors.NONE) else - (part, Errors.TRANSACTION_ABORTABLE) - }.toMap) + errors.put(part, Errors.TRANSACTION_ABORTABLE) + }) + Right(errors) } - } + }) } result match { case Left(err) => debug(s"Returning $err error code to client for $transactionalId's AddPartitions request for verification") - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitions.map(_ -> err).toMap.asJava)) + val errors = new util.HashMap[TopicPartition, Errors]() + partitions.forEach(partition => errors.put(partition, err)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) case Right(errors) => - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors.asJava)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) } } @@ -405,7 +413,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleAddPartitionsToTransaction(transactionalId: String, producerId: Long, producerEpoch: Short, - partitions: collection.Set[TopicPartition], + partitions: util.Set[TopicPartition], responseCallback: AddPartitionsCallback, clientTransactionVersion: TransactionVersion, requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { @@ -423,7 +431,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndMetadata.transactionMetadata // generate the new transaction metadata with added partitions - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.pendingTransitionInProgress) { // return a retriable exception to let the client backoff and retry // This check is performed first so that the pending transition can complete before subsequent checks. @@ -434,15 +442,15 @@ class TransactionCoordinator(txnConfig: TransactionConfig, Left(Errors.INVALID_PRODUCER_ID_MAPPING) } else if (txnMetadata.producerEpoch != producerEpoch) { Left(Errors.PRODUCER_FENCED) - } else if (txnMetadata.state == PrepareCommit || txnMetadata.state == PrepareAbort) { + } else if (txnMetadata.state == TransactionState.PREPARE_COMMIT || txnMetadata.state == TransactionState.PREPARE_ABORT) { Left(Errors.CONCURRENT_TRANSACTIONS) - } else if (txnMetadata.state == Ongoing && partitions.subsetOf(txnMetadata.topicPartitions)) { + } else if (txnMetadata.state == TransactionState.ONGOING && txnMetadata.topicPartitions.containsAll(partitions)) { // this is an optimization: if the partitions are already in the metadata reply OK immediately Left(Errors.NONE) } else { - Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions.toSet, time.milliseconds(), clientTransactionVersion)) + Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions, time.milliseconds(), clientTransactionVersion)) } - } + }) } result match { @@ -548,60 +556,60 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndTxnMetadata.transactionMetadata val coordinatorEpoch = epochAndTxnMetadata.coordinatorEpoch - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) // Strict equality is enforced on the client side requests, as they shouldn't bump the producer epoch. else if ((isFromClient && producerEpoch != txnMetadata.producerEpoch) || producerEpoch < txnMetadata.producerEpoch) Left(Errors.PRODUCER_FENCED) - else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) + else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != TransactionState.PREPARE_EPOCH_FENCE) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case Ongoing => + case TransactionState.ONGOING => val nextState = if (txnMarkerResult == TransactionResult.COMMIT) - PrepareCommit + TransactionState.PREPARE_COMMIT else - PrepareAbort + TransactionState.PREPARE_ABORT - if (nextState == PrepareAbort && txnMetadata.pendingState.contains(PrepareEpochFence)) { + if (nextState == TransactionState.PREPARE_ABORT && txnMetadata.pendingState.filter(s => s == TransactionState.PREPARE_EPOCH_FENCE).isPresent) { // We should clear the pending state to make way for the transition to PrepareAbort and also bump // the epoch in the transaction metadata we are about to append. isEpochFence = true - txnMetadata.pendingState = None - txnMetadata.producerEpoch = producerEpoch - txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH + txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.setProducerEpoch(producerEpoch) + txnMetadata.setLastProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) } Right(coordinatorEpoch, txnMetadata.prepareAbortOrCommit(nextState, TransactionVersion.fromFeatureLevel(0), RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)) - case CompleteCommit => + case TransactionState.COMPLETE_COMMIT => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.NONE) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case CompleteAbort => + case TransactionState.COMPLETE_ABORT => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.NONE) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case PrepareCommit => + case TransactionState.PREPARE_COMMIT => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case PrepareAbort => + case TransactionState.PREPARE_ABORT => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case Empty => + case TransactionState.EMPTY => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case Dead | PrepareEpochFence => + case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - } + }) } preAppendResult match { @@ -622,7 +630,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) else if (txnMetadata.producerEpoch != producerEpoch) @@ -630,25 +638,25 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (txnMetadata.pendingTransitionInProgress) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case Empty| Ongoing | CompleteCommit | CompleteAbort => + case TransactionState.EMPTY| TransactionState.ONGOING | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case PrepareCommit => + case TransactionState.PREPARE_COMMIT => if (txnMarkerResult != TransactionResult.COMMIT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case PrepareAbort => + case TransactionState.PREPARE_ABORT => if (txnMarkerResult != TransactionResult.ABORT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case Dead | PrepareEpochFence => + case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - } + }) } else { debug(s"The transaction coordinator epoch has changed to ${epochAndMetadata.coordinatorEpoch} after $txnMarkerResult was " + s"successfully appended to the log for $transactionalId with old epoch $coordinatorEpoch") @@ -681,7 +689,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { // This was attempted epoch fence that failed, so mark this state on the metadata - epochAndMetadata.transactionMetadata.hasFailedEpochFence = true + epochAndMetadata.transactionMetadata.hasFailedEpochFence(true) warn(s"The coordinator failed to write an epoch fence transition for producer $transactionalId to the transaction log " + s"with error $error. The epoch was increased to ${newMetadata.producerEpoch} but not returned to the client") } @@ -770,12 +778,12 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndTxnMetadata.transactionMetadata val coordinatorEpoch = epochAndTxnMetadata.coordinatorEpoch - txnMetadata.inLock { + txnMetadata.inLock(() => { producerIdCopy = txnMetadata.producerId producerEpochCopy = txnMetadata.producerEpoch // PrepareEpochFence has slightly different epoch bumping logic so don't include it here. // Note that, it can only happen when the current state is Ongoing. - isEpochFence = txnMetadata.pendingState.contains(PrepareEpochFence) + isEpochFence = txnMetadata.pendingState.filter(s => s == TransactionState.PREPARE_EPOCH_FENCE).isPresent // True if the client retried a request that had overflowed the epoch, and a new producer ID is stored in the txnMetadata val retryOnOverflow = !isEpochFence && txnMetadata.prevProducerId == producerId && producerEpoch == Short.MaxValue - 1 && txnMetadata.producerEpoch == 0 @@ -789,11 +797,11 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // Return producer fenced even in the cases where the epoch is higher and could indicate an invalid state transition. // Use the following criteria to determine if a v2 retry is valid: txnMetadata.state match { - case Ongoing | Empty | Dead | PrepareEpochFence => + case TransactionState.ONGOING | TransactionState.EMPTY | TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => producerEpoch == txnMetadata.producerEpoch - case PrepareCommit | PrepareAbort => + case TransactionState.PREPARE_COMMIT | TransactionState.PREPARE_ABORT => retryOnEpochBump - case CompleteCommit | CompleteAbort => + case TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => retryOnEpochBump || retryOnOverflow || producerEpoch == txnMetadata.producerEpoch } } else { @@ -817,12 +825,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, Right(RecordBatch.NO_PRODUCER_ID) } - if (nextState == PrepareAbort && isEpochFence) { - // We should clear the pending state to make way for the transition to PrepareAbort and also bump - // the epoch in the transaction metadata we are about to append. - txnMetadata.pendingState = None - txnMetadata.producerEpoch = producerEpoch - txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH + if (nextState == TransactionState.PREPARE_ABORT && isEpochFence) { + // We should clear the pending state to make way for the transition to PrepareAbort + txnMetadata.pendingState(util.Optional.empty()) + // For TV2+, don't manually set the epoch - let prepareAbortOrCommit handle it naturally. } nextProducerIdOrErrors.flatMap { @@ -831,7 +837,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } } - if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) { + if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != TransactionState.PREPARE_EPOCH_FENCE) { // This check is performed first so that the pending transition can complete before the next checks. // With TV2, we may be transitioning over a producer epoch overflow, and the producer may be using the // new producer ID that is still only in pending state. @@ -841,14 +847,14 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (!isValidEpoch) Left(Errors.PRODUCER_FENCED) else txnMetadata.state match { - case Ongoing => + case TransactionState.ONGOING => val nextState = if (txnMarkerResult == TransactionResult.COMMIT) - PrepareCommit + TransactionState.PREPARE_COMMIT else - PrepareAbort + TransactionState.PREPARE_ABORT generateTxnTransitMetadataForTxnCompletion(nextState, false) - case CompleteCommit => + case TransactionState.COMPLETE_COMMIT => if (txnMarkerResult == TransactionResult.COMMIT) { if (isRetry) Left(Errors.NONE) @@ -859,42 +865,42 @@ class TransactionCoordinator(txnConfig: TransactionConfig, if (isRetry) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else - generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) } - case CompleteAbort => + case TransactionState.COMPLETE_ABORT => if (txnMarkerResult == TransactionResult.ABORT) { if (isRetry) Left(Errors.NONE) else - generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) } else { // Commit. logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) } - case PrepareCommit => + case TransactionState.PREPARE_COMMIT => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case PrepareAbort => + case TransactionState.PREPARE_ABORT => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case Empty => + case TransactionState.EMPTY => if (txnMarkerResult == TransactionResult.ABORT) { - generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) } else { logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) } - case Dead | PrepareEpochFence => + case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - } + }) } preAppendResult match { @@ -919,7 +925,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) else if (txnMetadata.producerEpoch != producerEpoch && txnMetadata.producerEpoch != producerEpoch + 1) @@ -927,26 +933,26 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (txnMetadata.pendingTransitionInProgress) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case Empty| Ongoing | CompleteCommit | CompleteAbort => + case TransactionState.EMPTY | TransactionState.ONGOING | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case PrepareCommit => + case TransactionState.PREPARE_COMMIT => if (txnMarkerResult != TransactionResult.COMMIT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case PrepareAbort => + case TransactionState.PREPARE_ABORT => if (txnMarkerResult != TransactionResult.ABORT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case Dead | PrepareEpochFence => + case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - } + }) } else { debug(s"The transaction coordinator epoch has changed to ${epochAndMetadata.coordinatorEpoch} after $txnMarkerResult was " + s"successfully appended to the log for $transactionalId with old epoch $coordinatorEpoch") @@ -978,10 +984,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { - // This was attempted epoch fence that failed, so mark this state on the metadata - epochAndMetadata.transactionMetadata.hasFailedEpochFence = true + // For TV2, we allow re-bumping the epoch on retry, since we don't complete the epoch bump. + // Therefore, we don't set hasFailedEpochFence = true. warn(s"The coordinator failed to write an epoch fence transition for producer $transactionalId to the transaction log " + - s"with error $error. The epoch was increased to ${newMetadata.producerEpoch} but not returned to the client") + s"with error $error") } } } @@ -1027,7 +1033,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndTxnMetadata) => val txnMetadata = epochAndTxnMetadata.transactionMetadata - val transitMetadataOpt = txnMetadata.inLock { + val transitMetadataOpt = txnMetadata.inLock(() => { if (txnMetadata.producerId != txnIdAndPidEpoch.producerId) { error(s"Found incorrect producerId when expiring transactionalId: ${txnIdAndPidEpoch.transactionalId}. " + s"Expected producerId: ${txnIdAndPidEpoch.producerId}. Found producerId: " + @@ -1040,7 +1046,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } else { Some(txnMetadata.prepareFenceProducerEpoch()) } - } + }) transitMetadataOpt.foreach { txnTransitMetadata => endTransaction(txnMetadata.transactionalId, diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala index 5972418d0c1e2..f024e88aa8e2b 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala @@ -21,10 +21,12 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.TopicPartition +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} import org.apache.kafka.coordinator.transaction.generated.{CoordinatorRecordType, TransactionLogKey, TransactionLogValue} import org.apache.kafka.server.common.TransactionVersion -import scala.collection.mutable +import java.util + import scala.jdk.CollectionConverters._ /** @@ -50,7 +52,7 @@ object TransactionLog { * * @return key bytes */ - private[transaction] def keyToBytes(transactionalId: String): Array[Byte] = { + def keyToBytes(transactionalId: String): Array[Byte] = { MessageUtil.toCoordinatorTypePrefixedBytes(new TransactionLogKey().setTransactionalId(transactionalId)) } @@ -59,13 +61,13 @@ object TransactionLog { * * @return value payload bytes */ - private[transaction] def valueToBytes(txnMetadata: TxnTransitMetadata, + def valueToBytes(txnMetadata: TxnTransitMetadata, transactionVersionLevel: TransactionVersion): Array[Byte] = { - if (txnMetadata.txnState == Empty && txnMetadata.topicPartitions.nonEmpty) + if (txnMetadata.txnState == TransactionState.EMPTY && !txnMetadata.topicPartitions.isEmpty) throw new IllegalStateException(s"Transaction is not expected to have any partitions since its state is ${txnMetadata.txnState}: $txnMetadata") - val transactionPartitions = if (txnMetadata.txnState == Empty) null - else txnMetadata.topicPartitions + val transactionPartitions = if (txnMetadata.txnState == TransactionState.EMPTY) null + else txnMetadata.topicPartitions.asScala .groupBy(_.topic) .map { case (topic, partitions) => new TransactionLogValue.PartitionsSchema() @@ -113,28 +115,25 @@ object TransactionLog { val version = buffer.getShort if (version >= TransactionLogValue.LOWEST_SUPPORTED_VERSION && version <= TransactionLogValue.HIGHEST_SUPPORTED_VERSION) { val value = new TransactionLogValue(new ByteBufferAccessor(buffer), version) - val transactionMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = value.producerId, - prevProducerId = value.previousProducerId, - nextProducerId = value.nextProducerId, - producerEpoch = value.producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = value.transactionTimeoutMs, - state = TransactionState.fromId(value.transactionStatus), - topicPartitions = mutable.Set.empty[TopicPartition], - txnStartTimestamp = value.transactionStartTimestampMs, - txnLastUpdateTimestamp = value.transactionLastUpdateTimestampMs, - clientTransactionVersion = TransactionVersion.fromFeatureLevel(value.clientTransactionVersion)) - - if (!transactionMetadata.state.equals(Empty)) - value.transactionPartitions.forEach(partitionsSchema => - transactionMetadata.addPartitions(partitionsSchema.partitionIds - .asScala - .map(partitionId => new TopicPartition(partitionsSchema.topic, partitionId)) - .toSet) - ) - Some(transactionMetadata) + val state = TransactionState.fromId(value.transactionStatus) + val tps: util.Set[TopicPartition] = new util.HashSet[TopicPartition]() + if (!state.equals(TransactionState.EMPTY)) + value.transactionPartitions.forEach(partitionsSchema => { + partitionsSchema.partitionIds.forEach(partitionId => tps.add(new TopicPartition(partitionsSchema.topic, partitionId.intValue()))) + }) + Some(new TransactionMetadata( + transactionalId, + value.producerId, + value.previousProducerId, + value.nextProducerId, + value.producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + value.transactionTimeoutMs, + state, + tps, + value.transactionStartTimestampMs, + value.transactionLastUpdateTimestampMs, + TransactionVersion.fromFeatureLevel(value.clientTransactionVersion))) } else throw new IllegalStateException(s"Unknown version $version from the transaction log message value") } } diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala index 67d9cc6805de5..227eb9881a297 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala @@ -32,6 +32,7 @@ import org.apache.kafka.common.requests.{TransactionResult, WriteTxnMarkersReque import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{Node, Reconfigurable, TopicPartition} +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TxnTransitMetadata} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.metrics.KafkaMetricsGroup @@ -165,8 +166,10 @@ class TransactionMarkerChannelManager( time: Time ) extends InterBrokerSendThread("TxnMarkerSenderThread-" + config.brokerId, networkClient, config.requestTimeoutMs, time) with Logging { - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.coordinator.transaction" + private val metricsClassName = "TransactionMarkerChannelManager" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) this.logIdent = "[Transaction Marker Channel Manager " + config.brokerId + "]: " @@ -325,16 +328,16 @@ class TransactionMarkerChannelManager( info(s"Replaced an existing pending complete txn $prev with $pendingCompleteTxn while adding markers to send.") } addTxnMarkersToBrokerQueue(txnMetadata.producerId, - txnMetadata.producerEpoch, txnResult, pendingCompleteTxn, txnMetadata.topicPartitions.toSet) + txnMetadata.producerEpoch, txnResult, pendingCompleteTxn, txnMetadata.topicPartitions.asScala.toSet) maybeWriteTxnCompletion(transactionalId) } def numTxnsWithPendingMarkers: Int = transactionsWithPendingMarkers.size private def hasPendingMarkersToWrite(txnMetadata: TransactionMetadata): Boolean = { - txnMetadata.inLock { - txnMetadata.topicPartitions.nonEmpty - } + txnMetadata.inLock(() => + !txnMetadata.topicPartitions.isEmpty + ) } def maybeWriteTxnCompletion(transactionalId: String): Unit = { @@ -421,9 +424,9 @@ class TransactionMarkerChannelManager( val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock { + txnMetadata.inLock(() => topicPartitions.foreach(txnMetadata.removePartition) - } + ) maybeWriteTxnCompletion(transactionalId) } diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala index d95dabab6c356..63990fda9853b 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala @@ -131,7 +131,7 @@ class TransactionMarkerRequestCompletionHandler(brokerId: Int, txnMarkerChannelManager.removeMarkersForTxn(pendingCompleteTxn) abortSending = true } else { - txnMetadata.inLock { + txnMetadata.inLock(() => { for ((topicPartition, error) <- errors.asScala) { error match { case Errors.NONE => @@ -178,7 +178,7 @@ class TransactionMarkerRequestCompletionHandler(brokerId: Int, throw new IllegalStateException(s"Unexpected error ${other.exceptionName} while sending txn marker for $transactionalId") } } - } + }) } if (!abortSending) { diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala deleted file mode 100644 index aff6874951386..0000000000000 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala +++ /dev/null @@ -1,659 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.transaction - -import java.util.concurrent.locks.ReentrantLock -import kafka.utils.{CoreUtils, Logging, nonthreadsafe} -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.record.RecordBatch -import org.apache.kafka.server.common.TransactionVersion - -import scala.collection.{immutable, mutable} - - -object TransactionState { - val AllStates: Set[TransactionState] = Set( - Empty, - Ongoing, - PrepareCommit, - PrepareAbort, - CompleteCommit, - CompleteAbort, - Dead, - PrepareEpochFence - ) - - def fromName(name: String): Option[TransactionState] = { - AllStates.find(_.name == name) - } - - def fromId(id: Byte): TransactionState = { - id match { - case 0 => Empty - case 1 => Ongoing - case 2 => PrepareCommit - case 3 => PrepareAbort - case 4 => CompleteCommit - case 5 => CompleteAbort - case 6 => Dead - case 7 => PrepareEpochFence - case _ => throw new IllegalStateException(s"Unknown transaction state id $id from the transaction status message") - } - } -} - -private[transaction] sealed trait TransactionState { - def id: Byte - - /** - * Get the name of this state. This is exposed through the `DescribeTransactions` API. - */ - def name: String - - def validPreviousStates: Set[TransactionState] - - def isExpirationAllowed: Boolean = false -} - -/** - * Transaction has not existed yet - * - * transition: received AddPartitionsToTxnRequest => Ongoing - * received AddOffsetsToTxnRequest => Ongoing - * received EndTxnRequest with abort and TransactionV2 enabled => PrepareAbort - */ -private[transaction] case object Empty extends TransactionState { - val id: Byte = 0 - val name: String = "Empty" - val validPreviousStates: Set[TransactionState] = Set(Empty, CompleteCommit, CompleteAbort) - override def isExpirationAllowed: Boolean = true -} - -/** - * Transaction has started and ongoing - * - * transition: received EndTxnRequest with commit => PrepareCommit - * received EndTxnRequest with abort => PrepareAbort - * received AddPartitionsToTxnRequest => Ongoing - * received AddOffsetsToTxnRequest => Ongoing - */ -private[transaction] case object Ongoing extends TransactionState { - val id: Byte = 1 - val name: String = "Ongoing" - val validPreviousStates: Set[TransactionState] = Set(Ongoing, Empty, CompleteCommit, CompleteAbort) -} - -/** - * Group is preparing to commit - * - * transition: received acks from all partitions => CompleteCommit - */ -private[transaction] case object PrepareCommit extends TransactionState { - val id: Byte = 2 - val name: String = "PrepareCommit" - val validPreviousStates: Set[TransactionState] = Set(Ongoing) -} - -/** - * Group is preparing to abort - * - * transition: received acks from all partitions => CompleteAbort - * - * Note, In transaction v2, we allow Empty, CompleteCommit, CompleteAbort to transition to PrepareAbort. because the - * client may not know the txn state on the server side, it needs to send endTxn request when uncertain. - */ -private[transaction] case object PrepareAbort extends TransactionState { - val id: Byte = 3 - val name: String = "PrepareAbort" - val validPreviousStates: Set[TransactionState] = Set(Ongoing, PrepareEpochFence, Empty, CompleteCommit, CompleteAbort) -} - -/** - * Group has completed commit - * - * Will soon be removed from the ongoing transaction cache - */ -private[transaction] case object CompleteCommit extends TransactionState { - val id: Byte = 4 - val name: String = "CompleteCommit" - val validPreviousStates: Set[TransactionState] = Set(PrepareCommit) - override def isExpirationAllowed: Boolean = true -} - -/** - * Group has completed abort - * - * Will soon be removed from the ongoing transaction cache - */ -private[transaction] case object CompleteAbort extends TransactionState { - val id: Byte = 5 - val name: String = "CompleteAbort" - val validPreviousStates: Set[TransactionState] = Set(PrepareAbort) - override def isExpirationAllowed: Boolean = true -} - -/** - * TransactionalId has expired and is about to be removed from the transaction cache - */ -private[transaction] case object Dead extends TransactionState { - val id: Byte = 6 - val name: String = "Dead" - val validPreviousStates: Set[TransactionState] = Set(Empty, CompleteAbort, CompleteCommit) -} - -/** - * We are in the middle of bumping the epoch and fencing out older producers. - */ - -private[transaction] case object PrepareEpochFence extends TransactionState { - val id: Byte = 7 - val name: String = "PrepareEpochFence" - val validPreviousStates: Set[TransactionState] = Set(Ongoing) -} - -private[transaction] object TransactionMetadata { - def isEpochExhausted(producerEpoch: Short): Boolean = producerEpoch >= Short.MaxValue - 1 -} - -// this is a immutable object representing the target transition of the transaction metadata -private[transaction] case class TxnTransitMetadata(producerId: Long, - prevProducerId: Long, - nextProducerId: Long, - producerEpoch: Short, - lastProducerEpoch: Short, - txnTimeoutMs: Int, - txnState: TransactionState, - topicPartitions: mutable.Set[TopicPartition], - txnStartTimestamp: Long, - txnLastUpdateTimestamp: Long, - clientTransactionVersion: TransactionVersion) { - override def toString: String = { - "TxnTransitMetadata(" + - s"producerId=$producerId, " + - s"previousProducerId=$prevProducerId, " + - s"nextProducerId=$nextProducerId, " + - s"producerEpoch=$producerEpoch, " + - s"lastProducerEpoch=$lastProducerEpoch, " + - s"txnTimeoutMs=$txnTimeoutMs, " + - s"txnState=$txnState, " + - s"topicPartitions=$topicPartitions, " + - s"txnStartTimestamp=$txnStartTimestamp, " + - s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp, " + - s"clientTransactionVersion=$clientTransactionVersion)" - } -} - -/** - * - * @param producerId producer id - * @param prevProducerId producer id for the last committed transaction with this transactional ID - * @param nextProducerId Latest producer ID sent to the producer for the given transactional ID - * @param producerEpoch current epoch of the producer - * @param lastProducerEpoch last epoch of the producer - * @param txnTimeoutMs timeout to be used to abort long running transactions - * @param state current state of the transaction - * @param topicPartitions current set of partitions that are part of this transaction - * @param txnStartTimestamp time the transaction was started, i.e., when first partition is added - * @param txnLastUpdateTimestamp updated when any operation updates the TransactionMetadata. To be used for expiration - * @param clientTransactionVersion TransactionVersion used by the client when the state was transitioned - */ -@nonthreadsafe -private[transaction] class TransactionMetadata(val transactionalId: String, - var producerId: Long, - var prevProducerId: Long, - var nextProducerId: Long, - var producerEpoch: Short, - var lastProducerEpoch: Short, - var txnTimeoutMs: Int, - var state: TransactionState, - var topicPartitions: mutable.Set[TopicPartition], - @volatile var txnStartTimestamp: Long = -1, - @volatile var txnLastUpdateTimestamp: Long, - var clientTransactionVersion: TransactionVersion) extends Logging { - - // pending state is used to indicate the state that this transaction is going to - // transit to, and for blocking future attempts to transit it again if it is not legal; - // initialized as the same as the current state - var pendingState: Option[TransactionState] = None - - // Indicates that during a previous attempt to fence a producer, the bumped epoch may not have been - // successfully written to the log. If this is true, we will not bump the epoch again when fencing - var hasFailedEpochFence: Boolean = false - - private[transaction] val lock = new ReentrantLock - - def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun) - - def addPartitions(partitions: collection.Set[TopicPartition]): Unit = { - topicPartitions ++= partitions - } - - def removePartition(topicPartition: TopicPartition): Unit = { - if (state != PrepareCommit && state != PrepareAbort) - throw new IllegalStateException(s"Transaction metadata's current state is $state, and its pending state is $pendingState " + - s"while trying to remove partitions whose txn marker has been sent, this is not expected") - - topicPartitions -= topicPartition - } - - // this is visible for test only - def prepareNoTransit(): TxnTransitMetadata = { - // do not call transitTo as it will set the pending state, a follow-up call to abort the transaction will set its pending state - TxnTransitMetadata(producerId, prevProducerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, topicPartitions.clone(), - txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) - } - - def prepareFenceProducerEpoch(): TxnTransitMetadata = { - if (producerEpoch == Short.MaxValue) - throw new IllegalStateException(s"Cannot fence producer with epoch equal to Short.MaxValue since this would overflow") - - // If we've already failed to fence an epoch (because the write to the log failed), we don't increase it again. - // This is safe because we never return the epoch to client if we fail to fence the epoch - val bumpedEpoch = if (hasFailedEpochFence) producerEpoch else (producerEpoch + 1).toShort - - prepareTransitionTo( - state = PrepareEpochFence, - producerEpoch = bumpedEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH - ) - } - - def prepareIncrementProducerEpoch(newTxnTimeoutMs: Int, - expectedProducerEpoch: Option[Short], - updateTimestamp: Long): Either[Errors, TxnTransitMetadata] = { - if (isProducerEpochExhausted) - throw new IllegalStateException(s"Cannot allocate any more producer epochs for producerId $producerId") - - val bumpedEpoch = (producerEpoch + 1).toShort - val epochBumpResult: Either[Errors, (Short, Short)] = expectedProducerEpoch match { - case None => - // If no expected epoch was provided by the producer, bump the current epoch and set the last epoch to -1 - // In the case of a new producer, producerEpoch will be -1 and bumpedEpoch will be 0 - Right(bumpedEpoch, RecordBatch.NO_PRODUCER_EPOCH) - - case Some(expectedEpoch) => - if (producerEpoch == RecordBatch.NO_PRODUCER_EPOCH || expectedEpoch == producerEpoch) - // If the expected epoch matches the current epoch, or if there is no current epoch, the producer is attempting - // to continue after an error and no other producer has been initialized. Bump the current and last epochs. - // The no current epoch case means this is a new producer; producerEpoch will be -1 and bumpedEpoch will be 0 - Right(bumpedEpoch, producerEpoch) - else if (expectedEpoch == lastProducerEpoch) - // If the expected epoch matches the previous epoch, it is a retry of a successful call, so just return the - // current epoch without bumping. There is no danger of this producer being fenced, because a new producer - // calling InitProducerId would have caused the last epoch to be set to -1. - // Note that if the IBP is prior to 2.4.IV1, the lastProducerId and lastProducerEpoch will not be written to - // the transaction log, so a retry that spans a coordinator change will fail. We expect this to be a rare case. - Right(producerEpoch, lastProducerEpoch) - else { - // Otherwise, the producer has a fenced epoch and should receive an PRODUCER_FENCED error - info(s"Expected producer epoch $expectedEpoch does not match current " + - s"producer epoch $producerEpoch or previous producer epoch $lastProducerEpoch") - Left(Errors.PRODUCER_FENCED) - } - } - - epochBumpResult match { - case Right((nextEpoch, lastEpoch)) => Right(prepareTransitionTo( - state = Empty, - producerEpoch = nextEpoch, - lastProducerEpoch = lastEpoch, - txnTimeoutMs = newTxnTimeoutMs, - topicPartitions = mutable.Set.empty[TopicPartition], - txnStartTimestamp = -1, - txnLastUpdateTimestamp = updateTimestamp - )) - - case Left(err) => Left(err) - } - } - - def prepareProducerIdRotation(newProducerId: Long, - newTxnTimeoutMs: Int, - updateTimestamp: Long, - recordLastEpoch: Boolean): TxnTransitMetadata = { - if (hasPendingTransaction) - throw new IllegalStateException("Cannot rotate producer ids while a transaction is still pending") - - prepareTransitionTo( - state = Empty, - producerId = newProducerId, - producerEpoch = 0, - lastProducerEpoch = if (recordLastEpoch) producerEpoch else RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = newTxnTimeoutMs, - topicPartitions = mutable.Set.empty[TopicPartition], - txnStartTimestamp = -1, - txnLastUpdateTimestamp = updateTimestamp - ) - } - - def prepareAddPartitions(addedTopicPartitions: immutable.Set[TopicPartition], updateTimestamp: Long, clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { - val newTxnStartTimestamp = state match { - case Empty | CompleteAbort | CompleteCommit => updateTimestamp - case _ => txnStartTimestamp - } - - prepareTransitionTo( - state = Ongoing, - topicPartitions = (topicPartitions ++ addedTopicPartitions), - txnStartTimestamp = newTxnStartTimestamp, - txnLastUpdateTimestamp = updateTimestamp, - clientTransactionVersion = clientTransactionVersion - ) - } - - def prepareAbortOrCommit(newState: TransactionState, clientTransactionVersion: TransactionVersion, nextProducerId: Long, updateTimestamp: Long, noPartitionAdded: Boolean): TxnTransitMetadata = { - val (updatedProducerEpoch, updatedLastProducerEpoch) = if (clientTransactionVersion.supportsEpochBump()) { - // We already ensured that we do not overflow here. MAX_SHORT is the highest possible value. - ((producerEpoch + 1).toShort, producerEpoch) - } else { - (producerEpoch, lastProducerEpoch) - } - - // With transaction V2, it is allowed to abort the transaction without adding any partitions. Then, the transaction - // start time is uncertain but it is still required. So we can use the update time as the transaction start time. - val newTxnStartTimestamp = if (noPartitionAdded) updateTimestamp else txnStartTimestamp - prepareTransitionTo( - state = newState, - nextProducerId = nextProducerId, - producerEpoch = updatedProducerEpoch, - lastProducerEpoch = updatedLastProducerEpoch, - txnStartTimestamp = newTxnStartTimestamp, - txnLastUpdateTimestamp = updateTimestamp, - clientTransactionVersion = clientTransactionVersion - ) - } - - def prepareComplete(updateTimestamp: Long): TxnTransitMetadata = { - val newState = if (state == PrepareCommit) CompleteCommit else CompleteAbort - - // Since the state change was successfully written to the log, unset the flag for a failed epoch fence - hasFailedEpochFence = false - val (updatedProducerId, updatedProducerEpoch) = - // In the prepareComplete transition for the overflow case, the lastProducerEpoch is kept at MAX-1, - // which is the last epoch visible to the client. - // Internally, however, during the transition between prepareAbort/prepareCommit and prepareComplete, the producer epoch - // reaches MAX but the client only sees the transition as MAX-1 followed by 0. - // When an epoch overflow occurs, we set the producerId to nextProducerId and reset the epoch to 0, - // but lastProducerEpoch remains MAX-1 to maintain consistency with what the client last saw. - if (clientTransactionVersion.supportsEpochBump() && nextProducerId != RecordBatch.NO_PRODUCER_ID) { - (nextProducerId, 0.toShort) - } else { - (producerId, producerEpoch) - } - - prepareTransitionTo( - state = newState, - producerId = updatedProducerId, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = updatedProducerEpoch, - topicPartitions = mutable.Set.empty[TopicPartition], - txnLastUpdateTimestamp = updateTimestamp - ) - } - - def prepareDead(): TxnTransitMetadata = { - prepareTransitionTo( - state = Dead, - topicPartitions = mutable.Set.empty[TopicPartition] - ) - } - - /** - * Check if the epochs have been exhausted for the current producerId. We do not allow the client to use an - * epoch equal to Short.MaxValue to ensure that the coordinator will always be able to fence an existing producer. - */ - def isProducerEpochExhausted: Boolean = TransactionMetadata.isEpochExhausted(producerEpoch) - - /** - * Check if this is a distributed two phase commit transaction. - * Such transactions have no timeout (identified by maximum value for timeout). - */ - def isDistributedTwoPhaseCommitTxn: Boolean = txnTimeoutMs == Int.MaxValue - - private def hasPendingTransaction: Boolean = { - state match { - case Ongoing | PrepareAbort | PrepareCommit => true - case _ => false - } - } - - private def prepareTransitionTo(state: TransactionState, - producerId: Long = this.producerId, - nextProducerId: Long = this.nextProducerId, - producerEpoch: Short = this.producerEpoch, - lastProducerEpoch: Short = this.lastProducerEpoch, - txnTimeoutMs: Int = this.txnTimeoutMs, - topicPartitions: mutable.Set[TopicPartition] = this.topicPartitions, - txnStartTimestamp: Long = this.txnStartTimestamp, - txnLastUpdateTimestamp: Long = this.txnLastUpdateTimestamp, - clientTransactionVersion: TransactionVersion = this.clientTransactionVersion): TxnTransitMetadata = { - if (pendingState.isDefined) - throw new IllegalStateException(s"Preparing transaction state transition to $state " + - s"while it already a pending state ${pendingState.get}") - - if (producerId < 0) - throw new IllegalArgumentException(s"Illegal new producer id $producerId") - - // The epoch is initialized to NO_PRODUCER_EPOCH when the TransactionMetadata - // is created for the first time and it could stay like this until transitioning - // to Dead. - if (state != Dead && producerEpoch < 0) - throw new IllegalArgumentException(s"Illegal new producer epoch $producerEpoch") - - // check that the new state transition is valid and update the pending state if necessary - if (state.validPreviousStates.contains(this.state)) { - val transitMetadata = TxnTransitMetadata(producerId, this.producerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, - topicPartitions, txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) - debug(s"TransactionalId ${this.transactionalId} prepare transition from ${this.state} to $transitMetadata") - pendingState = Some(state) - transitMetadata - } else { - throw new IllegalStateException(s"Preparing transaction state transition to $state failed since the target state" + - s" $state is not a valid previous state of the current state ${this.state}") - } - } - - def completeTransitionTo(transitMetadata: TxnTransitMetadata): Unit = { - // metadata transition is valid only if all the following conditions are met: - // - // 1. the new state is already indicated in the pending state. - // 2. the epoch should be either the same value, the old value + 1, or 0 if we have a new producerId. - // 3. the last update time is no smaller than the old value. - // 4. the old partitions set is a subset of the new partitions set. - // - // plus, we should only try to update the metadata after the corresponding log entry has been successfully - // written and replicated (see TransactionStateManager#appendTransactionToLog) - // - // if valid, transition is done via overwriting the whole object to ensure synchronization - - val toState = pendingState.getOrElse { - fatal(s"$this's transition to $transitMetadata failed since pendingState is not defined: this should not happen") - - throw new IllegalStateException(s"TransactionalId $transactionalId " + - "completing transaction state transition while it does not have a pending state") - } - - if (toState != transitMetadata.txnState) { - throwStateTransitionFailure(transitMetadata) - } else { - toState match { - case Empty => // from initPid - if ((producerEpoch != transitMetadata.producerEpoch && !validProducerEpochBump(transitMetadata)) || - transitMetadata.topicPartitions.nonEmpty || - transitMetadata.txnStartTimestamp != -1) { - - throwStateTransitionFailure(transitMetadata) - } - - case Ongoing => // from addPartitions - if (!validProducerEpoch(transitMetadata) || - !topicPartitions.subsetOf(transitMetadata.topicPartitions) || - txnTimeoutMs != transitMetadata.txnTimeoutMs) { - - throwStateTransitionFailure(transitMetadata) - } - - case PrepareAbort | PrepareCommit => // from endTxn - // In V2, we allow state transits from Empty, CompleteCommit and CompleteAbort to PrepareAbort. It is possible - // their updated start time is not equal to the current start time. - val allowedEmptyAbort = toState == PrepareAbort && transitMetadata.clientTransactionVersion.supportsEpochBump() && - (state == Empty || state == CompleteCommit || state == CompleteAbort) - val validTimestamp = txnStartTimestamp == transitMetadata.txnStartTimestamp || allowedEmptyAbort - if (!validProducerEpoch(transitMetadata) || - !topicPartitions.equals(transitMetadata.topicPartitions) || - txnTimeoutMs != transitMetadata.txnTimeoutMs || !validTimestamp) { - - throwStateTransitionFailure(transitMetadata) - } - - case CompleteAbort | CompleteCommit => // from write markers - if (!validProducerEpoch(transitMetadata) || - txnTimeoutMs != transitMetadata.txnTimeoutMs || - transitMetadata.txnStartTimestamp == -1) { - throwStateTransitionFailure(transitMetadata) - } - - case PrepareEpochFence => - // We should never get here, since once we prepare to fence the epoch, we immediately set the pending state - // to PrepareAbort, and then consequently to CompleteAbort after the markers are written.. So we should never - // ever try to complete a transition to PrepareEpochFence, as it is not a valid previous state for any other state, and hence - // can never be transitioned out of. - throwStateTransitionFailure(transitMetadata) - - - case Dead => - // The transactionalId was being expired. The completion of the operation should result in removal of the - // the metadata from the cache, so we should never realistically transition to the dead state. - throw new IllegalStateException(s"TransactionalId $transactionalId is trying to complete a transition to " + - s"$toState. This means that the transactionalId was being expired, and the only acceptable completion of " + - s"this operation is to remove the transaction metadata from the cache, not to persist the $toState in the log.") - } - - debug(s"TransactionalId $transactionalId complete transition from $state to $transitMetadata") - producerId = transitMetadata.producerId - prevProducerId = transitMetadata.prevProducerId - nextProducerId = transitMetadata.nextProducerId - producerEpoch = transitMetadata.producerEpoch - lastProducerEpoch = transitMetadata.lastProducerEpoch - txnTimeoutMs = transitMetadata.txnTimeoutMs - topicPartitions = transitMetadata.topicPartitions - txnStartTimestamp = transitMetadata.txnStartTimestamp - txnLastUpdateTimestamp = transitMetadata.txnLastUpdateTimestamp - clientTransactionVersion = transitMetadata.clientTransactionVersion - - pendingState = None - state = toState - } - } - - /** - * Validates the producer epoch and ID based on transaction state and version. - * - * Logic: - * * 1. **Overflow Case in Transactions V2:** - * * - During overflow (epoch reset to 0), we compare both `lastProducerEpoch` values since it - * * does not change during completion. - * * - For PrepareComplete, the producer ID has been updated. We ensure that the `prevProducerID` - * * in the transit metadata matches the current producer ID, confirming the change. - * * - * * 2. **Epoch Bump Case in Transactions V2:** - * * - For PrepareCommit or PrepareAbort, the producer epoch has been bumped. We ensure the `lastProducerEpoch` - * * in transit metadata matches the current producer epoch, confirming the bump. - * * - We also verify that the producer ID remains the same. - * * - * * 3. **Other Cases:** - * * - For other states and versions, check if the producer epoch and ID match the current values. - * - * @param transitMetadata The transaction transition metadata containing state, producer epoch, and ID. - * @return true if the producer epoch and ID are valid; false otherwise. - */ - private def validProducerEpoch(transitMetadata: TxnTransitMetadata): Boolean = { - val isAtLeastTransactionsV2 = transitMetadata.clientTransactionVersion.supportsEpochBump() - val txnState = transitMetadata.txnState - val transitProducerEpoch = transitMetadata.producerEpoch - val transitProducerId = transitMetadata.producerId - val transitLastProducerEpoch = transitMetadata.lastProducerEpoch - - (isAtLeastTransactionsV2, txnState, transitProducerEpoch) match { - case (true, CompleteCommit | CompleteAbort, epoch) if epoch == 0.toShort => - transitLastProducerEpoch == lastProducerEpoch && - transitMetadata.prevProducerId == producerId - - case (true, PrepareCommit | PrepareAbort, _) => - transitLastProducerEpoch == producerEpoch && - transitProducerId == producerId - - case _ => - transitProducerEpoch == producerEpoch && - transitProducerId == producerId - } - } - - private def validProducerEpochBump(transitMetadata: TxnTransitMetadata): Boolean = { - val transitEpoch = transitMetadata.producerEpoch - val transitProducerId = transitMetadata.producerId - transitEpoch == producerEpoch + 1 || (transitEpoch == 0 && transitProducerId != producerId) - } - - private def throwStateTransitionFailure(txnTransitMetadata: TxnTransitMetadata): Unit = { - fatal(s"${this.toString}'s transition to $txnTransitMetadata failed: this should not happen") - - throw new IllegalStateException(s"TransactionalId $transactionalId failed transition to state $txnTransitMetadata " + - "due to unexpected metadata") - } - - def pendingTransitionInProgress: Boolean = pendingState.isDefined - - override def toString: String = { - "TransactionMetadata(" + - s"transactionalId=$transactionalId, " + - s"producerId=$producerId, " + - s"prevProducerId=$prevProducerId, " + - s"nextProducerId=$nextProducerId, " + - s"producerEpoch=$producerEpoch, " + - s"lastProducerEpoch=$lastProducerEpoch, " + - s"txnTimeoutMs=$txnTimeoutMs, " + - s"state=$state, " + - s"pendingState=$pendingState, " + - s"topicPartitions=$topicPartitions, " + - s"txnStartTimestamp=$txnStartTimestamp, " + - s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp, " + - s"clientTransactionVersion=$clientTransactionVersion)" - } - - override def equals(that: Any): Boolean = that match { - case other: TransactionMetadata => - transactionalId == other.transactionalId && - producerId == other.producerId && - producerEpoch == other.producerEpoch && - lastProducerEpoch == other.lastProducerEpoch && - txnTimeoutMs == other.txnTimeoutMs && - state.equals(other.state) && - topicPartitions.equals(other.topicPartitions) && - txnStartTimestamp == other.txnStartTimestamp && - txnLastUpdateTimestamp == other.txnLastUpdateTimestamp && - clientTransactionVersion == other.clientTransactionVersion - case _ => false - } - - override def hashCode(): Int = { - val fields = Seq(transactionalId, producerId, producerEpoch, txnTimeoutMs, state, topicPartitions, - txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) - fields.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) - } -} diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala index 0c6391af6dbd0..82b960c5ba799 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala @@ -18,11 +18,12 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer import java.util.Properties +import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.ReentrantReadWriteLock import kafka.server.ReplicaManager import kafka.utils.CoreUtils.{inReadLock, inWriteLock} -import kafka.utils.{Logging, Pool} +import kafka.utils.Logging import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.ListTransactionsResponseData @@ -33,8 +34,8 @@ import org.apache.kafka.common.record.{FileRecords, MemoryRecords, MemoryRecords import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.TransactionResult import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{KafkaException, TopicPartition} -import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} +import org.apache.kafka.common.{KafkaException, TopicIdPartition, TopicPartition} +import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{RequestLocal, TransactionVersion} import org.apache.kafka.server.config.ServerConfigs @@ -42,6 +43,10 @@ import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler import org.apache.kafka.storage.internals.log.AppendOrigin +import com.google.re2j.{Pattern, PatternSyntaxException} +import org.apache.kafka.common.errors.InvalidRegularExpression + +import java.util.Optional import scala.jdk.CollectionConverters._ import scala.collection.mutable @@ -126,12 +131,12 @@ class TransactionStateManager(brokerId: Int, val now = time.milliseconds() inReadLock(stateLock) { transactionMetadataCache.flatMap { case (_, entry) => - entry.metadataPerTransactionalId.filter { case (_, txnMetadata) => + entry.metadataPerTransactionalId.asScala.filter { case (_, txnMetadata) => if (txnMetadata.pendingTransitionInProgress) { false } else { txnMetadata.state match { - case Ongoing => + case TransactionState.ONGOING => // Do not apply timeout to distributed two phase commit transactions. (!txnMetadata.isDistributedTwoPhaseCommitTxn) && (txnMetadata.txnStartTimestamp + txnMetadata.txnTimeoutMs < now) @@ -156,7 +161,7 @@ class TransactionStateManager(brokerId: Int, val maxBatchSize = logConfig.maxMessageSize val expired = mutable.ListBuffer.empty[TransactionalIdCoordinatorEpochAndMetadata] var recordsBuilder: MemoryRecordsBuilder = null - val stateEntries = txnMetadataCacheEntry.metadataPerTransactionalId.values.iterator.buffered + val stateEntries = txnMetadataCacheEntry.metadataPerTransactionalId.values.asScala.iterator.buffered def flushRecordsBuilder(): Unit = { writeTombstonesForExpiredTransactionalIds( @@ -173,7 +178,7 @@ class TransactionStateManager(brokerId: Int, val transactionalId = txnMetadata.transactionalId var fullBatch = false - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadata.pendingState.isEmpty && shouldExpire(txnMetadata, currentTimeMs)) { if (recordsBuilder == null) { recordsBuilder = MemoryRecords.builder( @@ -196,7 +201,7 @@ class TransactionStateManager(brokerId: Int, fullBatch = true } } - } + }) if (fullBatch) { flushRecordsBuilder() @@ -253,16 +258,16 @@ class TransactionStateManager(brokerId: Int, expiredForPartition: Iterable[TransactionalIdCoordinatorEpochAndMetadata], tombstoneRecords: MemoryRecords ): Unit = { - def removeFromCacheCallback(responses: collection.Map[TopicPartition, PartitionResponse]): Unit = { + def removeFromCacheCallback(responses: collection.Map[TopicIdPartition, PartitionResponse]): Unit = { responses.foreachEntry { (topicPartition, response) => inReadLock(stateLock) { transactionMetadataCache.get(topicPartition.partition).foreach { txnMetadataCacheEntry => expiredForPartition.foreach { idCoordinatorEpochAndMetadata => val transactionalId = idCoordinatorEpochAndMetadata.transactionalId val txnMetadata = txnMetadataCacheEntry.metadataPerTransactionalId.get(transactionalId) - txnMetadata.inLock { + txnMetadata.inLock(() => { if (txnMetadataCacheEntry.coordinatorEpoch == idCoordinatorEpochAndMetadata.coordinatorEpoch - && txnMetadata.pendingState.contains(Dead) + && txnMetadata.pendingState.filter(s => s == TransactionState.DEAD).isPresent && txnMetadata.producerEpoch == idCoordinatorEpochAndMetadata.transitMetadata.producerEpoch && response.error == Errors.NONE) { txnMetadataCacheEntry.metadataPerTransactionalId.remove(transactionalId) @@ -273,9 +278,9 @@ class TransactionStateManager(brokerId: Int, s" expected producerEpoch: ${idCoordinatorEpochAndMetadata.transitMetadata.producerEpoch}," + s" coordinatorEpoch: ${txnMetadataCacheEntry.coordinatorEpoch}, expected coordinatorEpoch: " + s"${idCoordinatorEpochAndMetadata.coordinatorEpoch}") - txnMetadata.pendingState = None + txnMetadata.pendingState(Optional.empty()) } - } + }) } } } @@ -288,7 +293,7 @@ class TransactionStateManager(brokerId: Int, requiredAcks = TransactionLog.EnforcedRequiredAcks, internalTopicsAllowed = true, origin = AppendOrigin.COORDINATOR, - entriesPerPartition = Map(transactionPartition -> tombstoneRecords), + entriesPerPartition = Map(replicaManager.topicIdPartition(transactionPartition) -> tombstoneRecords), responseCallback = removeFromCacheCallback, requestLocal = RequestLocal.noCaching) } @@ -315,7 +320,8 @@ class TransactionStateManager(brokerId: Int, def listTransactionStates( filterProducerIds: Set[Long], filterStateNames: Set[String], - filterDurationMs: Long + filterDurationMs: Long, + filterTransactionalIdPattern: String ): ListTransactionsResponseData = { inReadLock(stateLock) { val response = new ListTransactionsResponseData() @@ -324,15 +330,15 @@ class TransactionStateManager(brokerId: Int, } else { val filterStates = mutable.Set.empty[TransactionState] filterStateNames.foreach { stateName => - TransactionState.fromName(stateName) match { - case Some(state) => filterStates += state - case None => response.unknownStateFilters.add(stateName) - } + TransactionState.fromName(stateName).ifPresentOrElse( + state => filterStates += state, + () => response.unknownStateFilters.add(stateName) + ) } val now : Long = time.milliseconds() - def shouldInclude(txnMetadata: TransactionMetadata): Boolean = { - if (txnMetadata.state == Dead) { + def shouldInclude(txnMetadata: TransactionMetadata, pattern: Pattern): Boolean = { + if (txnMetadata.state == TransactionState.DEAD) { // We filter the `Dead` state since it is a transient state which // indicates that the transactionalId and its metadata are in the // process of expiration and removal. @@ -343,23 +349,34 @@ class TransactionStateManager(brokerId: Int, false } else if (filterDurationMs >= 0 && (now - txnMetadata.txnStartTimestamp) <= filterDurationMs) { false + } else if (pattern != null) { + pattern.matcher(txnMetadata.transactionalId).matches() } else { true } } val states = new java.util.ArrayList[ListTransactionsResponseData.TransactionState] + val pattern = if (filterTransactionalIdPattern != null && filterTransactionalIdPattern.nonEmpty) { + try { + Pattern.compile(filterTransactionalIdPattern) + } + catch { + case e: PatternSyntaxException => + throw new InvalidRegularExpression(String.format("Transaction ID pattern `%s` is not a valid regular expression: %s.", filterTransactionalIdPattern, e.getMessage)) + } + } else null transactionMetadataCache.foreachEntry { (_, cache) => - cache.metadataPerTransactionalId.values.foreach { txnMetadata => - txnMetadata.inLock { - if (shouldInclude(txnMetadata)) { + cache.metadataPerTransactionalId.forEach { (_, txnMetadata) => + txnMetadata.inLock(() => { + if (shouldInclude(txnMetadata, pattern)) { states.add(new ListTransactionsResponseData.TransactionState() .setTransactionalId(txnMetadata.transactionalId) .setProducerId(txnMetadata.producerId) - .setTransactionState(txnMetadata.state.name) + .setTransactionState(txnMetadata.state.stateName) ) } - } + }) } } response.setErrorCode(Errors.NONE.code) @@ -386,7 +403,7 @@ class TransactionStateManager(brokerId: Int, case Some(cacheEntry) => val txnMetadata = Option(cacheEntry.metadataPerTransactionalId.get(transactionalId)).orElse { createdTxnMetadataOpt.map { createdTxnMetadata => - Option(cacheEntry.metadataPerTransactionalId.putIfNotExists(transactionalId, createdTxnMetadata)) + Option(cacheEntry.metadataPerTransactionalId.putIfAbsent(transactionalId, createdTxnMetadata)) .getOrElse(createdTxnMetadata) } } @@ -428,10 +445,10 @@ class TransactionStateManager(brokerId: Int, def partitionFor(transactionalId: String): Int = Utils.abs(transactionalId.hashCode) % transactionTopicPartitionCount - private def loadTransactionMetadata(topicPartition: TopicPartition, coordinatorEpoch: Int): Pool[String, TransactionMetadata] = { + private def loadTransactionMetadata(topicPartition: TopicPartition, coordinatorEpoch: Int): ConcurrentMap[String, TransactionMetadata] = { def logEndOffset = replicaManager.getLogEndOffset(topicPartition).getOrElse(-1L) - val loadedTransactions = new Pool[String, TransactionMetadata] + val loadedTransactions = new ConcurrentHashMap[String, TransactionMetadata] replicaManager.getLog(topicPartition) match { case None => @@ -509,7 +526,7 @@ class TransactionStateManager(brokerId: Int, */ private[transaction] def addLoadedTransactionsToCache(txnTopicPartition: Int, coordinatorEpoch: Int, - loadedTransactions: Pool[String, TransactionMetadata]): Unit = { + loadedTransactions: ConcurrentMap[String, TransactionMetadata]): Unit = { val txnMetadataCacheEntry = TxnMetadataCacheEntry(coordinatorEpoch, loadedTransactions) val previousTxnMetadataCacheEntryOpt = transactionMetadataCache.put(txnTopicPartition, txnMetadataCacheEntry) @@ -549,22 +566,21 @@ class TransactionStateManager(brokerId: Int, addLoadedTransactionsToCache(topicPartition.partition, coordinatorEpoch, loadedTransactions) val transactionsPendingForCompletion = new mutable.ListBuffer[TransactionalIdCoordinatorEpochAndTransitMetadata] - loadedTransactions.foreach { - case (transactionalId, txnMetadata) => - txnMetadata.inLock { - // if state is PrepareCommit or PrepareAbort we need to complete the transaction - txnMetadata.state match { - case PrepareAbort => - transactionsPendingForCompletion += - TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.ABORT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case PrepareCommit => - transactionsPendingForCompletion += - TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.COMMIT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case _ => - // nothing needs to be done - } + loadedTransactions.forEach((transactionalId, txnMetadata) => { + txnMetadata.inLock(() => { + // if state is PrepareCommit or PrepareAbort we need to complete the transaction + txnMetadata.state match { + case TransactionState.PREPARE_ABORT => + transactionsPendingForCompletion += + TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.ABORT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case TransactionState.PREPARE_COMMIT => + transactionsPendingForCompletion += + TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.COMMIT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case _ => + // nothing needs to be done } - } + }) + }) // we first remove the partition from loading partition then send out the markers for those pending to be // completed transactions, so that when the markers get sent the attempt of appending the complete transaction @@ -648,17 +664,18 @@ class TransactionStateManager(brokerId: Int, val timestamp = time.milliseconds() val records = MemoryRecords.withRecords(TransactionLog.EnforcedCompression, new SimpleRecord(timestamp, keyBytes, valueBytes)) - val topicPartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionFor(transactionalId)) - val recordsPerPartition = Map(topicPartition -> records) + val transactionStateTopicPartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionFor(transactionalId)) + val transactionStateTopicIdPartition = replicaManager.topicIdPartition(transactionStateTopicPartition) + val recordsPerPartition = Map(transactionStateTopicIdPartition -> records) // set the callback function to update transaction status in cache after log append completed - def updateCacheCallback(responseStatus: collection.Map[TopicPartition, PartitionResponse]): Unit = { + def updateCacheCallback(responseStatus: collection.Map[TopicIdPartition, PartitionResponse]): Unit = { // the append response should only contain the topics partition - if (responseStatus.size != 1 || !responseStatus.contains(topicPartition)) + if (responseStatus.size != 1 || !responseStatus.contains(transactionStateTopicIdPartition)) throw new IllegalStateException("Append status %s should only have one partition %s" - .format(responseStatus, topicPartition)) + .format(responseStatus, transactionStateTopicPartition)) - val status = responseStatus(topicPartition) + val status = responseStatus(transactionStateTopicIdPartition) var responseError = if (status.error == Errors.NONE) { Errors.NONE @@ -698,7 +715,7 @@ class TransactionStateManager(brokerId: Int, case Right(Some(epochAndMetadata)) => val metadata = epochAndMetadata.transactionMetadata - metadata.inLock { + metadata.inLock(() => { if (epochAndMetadata.coordinatorEpoch != coordinatorEpoch) { // the cache may have been changed due to txn topic partition emigration and immigration, // in this case directly return NOT_COORDINATOR to client and let it to re-discover the transaction coordinator @@ -710,7 +727,7 @@ class TransactionStateManager(brokerId: Int, metadata.completeTransitionTo(newMetadata) debug(s"Updating $transactionalId's transaction state to $newMetadata with coordinator epoch $coordinatorEpoch for $transactionalId succeeded") } - } + }) case Right(None) => // this transactional id no longer exists, maybe the corresponding partition has already been migrated out. @@ -725,7 +742,7 @@ class TransactionStateManager(brokerId: Int, getTransactionState(transactionalId) match { case Right(Some(epochAndTxnMetadata)) => val metadata = epochAndTxnMetadata.transactionMetadata - metadata.inLock { + metadata.inLock(() => { if (epochAndTxnMetadata.coordinatorEpoch == coordinatorEpoch) { if (retryOnError(responseError)) { info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + @@ -734,13 +751,13 @@ class TransactionStateManager(brokerId: Int, info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + s"resetting pending state from ${metadata.pendingState}, aborting state transition and returning $responseError in the callback") - metadata.pendingState = None + metadata.pendingState(Optional.empty()) } } else { info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + s"aborting state transition and returning the error in the callback since the coordinator epoch has changed from ${epochAndTxnMetadata.coordinatorEpoch} to $coordinatorEpoch") } - } + }) case Right(None) => // Do nothing here, since we want to return the original append error to the user. @@ -775,7 +792,7 @@ class TransactionStateManager(brokerId: Int, case Right(Some(epochAndMetadata)) => val metadata = epochAndMetadata.transactionMetadata - val append: Boolean = metadata.inLock { + val append: Boolean = metadata.inLock(() => { if (epochAndMetadata.coordinatorEpoch != coordinatorEpoch) { // the coordinator epoch has changed, reply to client immediately with NOT_COORDINATOR responseCallback(Errors.NOT_COORDINATOR) @@ -785,7 +802,7 @@ class TransactionStateManager(brokerId: Int, // under the same coordinator epoch, so directly append to txn log now true } - } + }) if (append) { replicaManager.appendRecords( timeout = newMetadata.txnTimeoutMs.toLong, @@ -820,7 +837,7 @@ class TransactionStateManager(brokerId: Int, private[transaction] case class TxnMetadataCacheEntry(coordinatorEpoch: Int, - metadataPerTransactionalId: Pool[String, TransactionMetadata]) { + metadataPerTransactionalId: ConcurrentMap[String, TransactionMetadata]) { override def toString: String = { s"TxnMetadataCacheEntry(coordinatorEpoch=$coordinatorEpoch, numTransactionalEntries=${metadataPerTransactionalId.size})" } diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index 506e46e6ce5d7..bfee35061f82f 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -23,9 +23,8 @@ import java.nio.file.{Files, NoSuchFileException} import java.util.concurrent._ import java.util.concurrent.atomic.AtomicInteger import kafka.server.{KafkaConfig, KafkaRaftServer} -import kafka.server.metadata.BrokerMetadataPublisher.info import kafka.utils.threadsafe -import kafka.utils.{CoreUtils, Logging, Pool} +import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.common.{DirectoryId, KafkaException, TopicPartition, Uuid} import org.apache.kafka.common.utils.{Exit, KafkaThread, Time, Utils} import org.apache.kafka.common.errors.{InconsistentTopicIdException, KafkaStorageException, LogDirNotFoundException} @@ -42,7 +41,7 @@ import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsem import java.util.{Collections, Optional, OptionalLong, Properties} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.{FileLock, Scheduler} -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, LogOffsetsListener, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, LogManager => JLogManager, LogOffsetsListener, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} import org.apache.kafka.storage.internals.checkpoint.{CleanShutdownFileHandler, OffsetCheckpointFile} import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -78,11 +77,14 @@ class LogManager(logDirs: Seq[File], logDirFailureChannel: LogDirFailureChannel, time: Time, remoteStorageSystemEnable: Boolean, - val initialTaskDelayMs: Long) extends Logging { - - import LogManager._ - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + val initialTaskDelayMs: Long, + cleanerFactory: (CleanerConfig, util.List[File], ConcurrentMap[TopicPartition, UnifiedLog], LogDirFailureChannel, Time) => LogCleaner = + (cleanerConfig, files, map, logDirFailureChannel, time) => new LogCleaner(cleanerConfig, files, map, logDirFailureChannel, time) + ) extends Logging { + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.log" + private val metricsClassName = "LogManager" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val logCreationOrDeletionLock = new Object private val currentLogs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() @@ -95,7 +97,7 @@ class LogManager(logDirs: Seq[File], // Map of stray partition to stray log. This holds all stray logs detected on the broker. // Visible for testing - private val strayLogs = new Pool[TopicPartition, UnifiedLog]() + private val strayLogs = new ConcurrentHashMap[TopicPartition, UnifiedLog]() private val _liveLogDirs: ConcurrentLinkedQueue[File] = createAndValidateLogDirs(logDirs, initialOfflineDirs) @volatile private var _currentDefaultConfig = initialDefaultConfig @@ -127,9 +129,9 @@ class LogManager(logDirs: Seq[File], def directoryIdsSet: Predef.Set[Uuid] = directoryIds.values.toSet @volatile private var recoveryPointCheckpoints = liveLogDirs.map(dir => - (dir, new OffsetCheckpointFile(new File(dir, RecoveryPointCheckpointFile), logDirFailureChannel))).toMap + (dir, new OffsetCheckpointFile(new File(dir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), logDirFailureChannel))).toMap @volatile private var logStartOffsetCheckpoints = liveLogDirs.map(dir => - (dir, new OffsetCheckpointFile(new File(dir, LogStartOffsetCheckpointFile), logDirFailureChannel))).toMap + (dir, new OffsetCheckpointFile(new File(dir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE), logDirFailureChannel))).toMap private val preferredLogDirs = new ConcurrentHashMap[TopicPartition, String]() @@ -261,7 +263,7 @@ class LogManager(logDirs: Seq[File], private def lockLogDirs(dirs: Seq[File]): Seq[FileLock] = { dirs.flatMap { dir => try { - val lock = new FileLock(new File(dir, LockFileName)) + val lock = new FileLock(new File(dir, JLogManager.LOCK_FILE_NAME)) if (!lock.tryLock()) throw new KafkaException("Failed to acquire lock on file .lock in " + lock.file.getParent + ". A Kafka instance in another process or thread is using this directory.") @@ -630,8 +632,11 @@ class LogManager(logDirs: Seq[File], initialTaskDelayMs) } if (cleanerConfig.enableCleaner) { - _cleaner = new LogCleaner(cleanerConfig, liveLogDirs.asJava, currentLogs, logDirFailureChannel, time) + _cleaner = cleanerFactory(cleanerConfig, liveLogDirs.asJava, currentLogs, logDirFailureChannel, time) _cleaner.startup() + } else { + warn("The config `log.cleaner.enable` is deprecated and will be removed in Kafka 5.0. Starting from Kafka 5.0, the log cleaner will always be enabled, and this config will be ignored.") + } } @@ -680,7 +685,7 @@ class LogManager(logDirs: Seq[File], try { jobs.foreachEntry { (dir, dirJobs) => - if (waitForAllToComplete(dirJobs, + if (JLogManager.waitForAllToComplete(dirJobs.toList.asJava, e => warn(s"There was an error in one of the threads during LogManager shutdown: ${e.getCause}"))) { val logs = logsInDir(localLogsByDir, dir) @@ -893,7 +898,7 @@ class LogManager(logDirs: Seq[File], /** * Resume cleaning of the provided partition and log a message about it. */ - private def resumeCleaning(topicPartition: TopicPartition): Unit = { + def resumeCleaning(topicPartition: TopicPartition): Unit = { if (cleaner != null) { cleaner.resumeCleaning(util.Set.of(topicPartition)) info(s"Cleaning for partition $topicPartition is resumed") @@ -1520,25 +1525,6 @@ class LogManager(logDirs: Seq[File], } object LogManager { - val LockFileName = ".lock" - - /** - * Wait all jobs to complete - * @param jobs jobs - * @param callback this will be called to handle the exception caused by each Future#get - * @return true if all pass. Otherwise, false - */ - private[log] def waitForAllToComplete(jobs: Seq[Future[_]], callback: Throwable => Unit): Boolean = { - jobs.count(future => Try(future.get) match { - case Success(_) => false - case Failure(e) => - callback(e) - true - }) == 0 - } - - val RecoveryPointCheckpointFile = "recovery-point-offset-checkpoint" - val LogStartOffsetCheckpointFile = "log-start-offset-checkpoint" def apply(config: KafkaConfig, initialOfflineDirs: Seq[String], @@ -1555,7 +1541,7 @@ object LogManager { val cleanerConfig = new CleanerConfig(config) val transactionLogConfig = new TransactionLogConfig(config) - new LogManager(logDirs = config.logDirs.map(new File(_).getAbsoluteFile), + new LogManager(logDirs = config.logDirs.asScala.map(new File(_).getAbsoluteFile), initialOfflineDirs = initialOfflineDirs.map(new File(_).getAbsoluteFile), configRepository = configRepository, initialDefaultConfig = defaultLogConfig, @@ -1575,45 +1561,4 @@ object LogManager { remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled, initialTaskDelayMs = config.logInitialTaskDelayMs) } - - /** - * Returns true if the given log should not be on the current broker - * according to the metadata image. - * - * @param brokerId The ID of the current broker. - * @param newTopicsImage The new topics image after broker has been reloaded - * @param log The log object to check - * @return true if the log should not exist on the broker, false otherwise. - */ - def isStrayKraftReplica( - brokerId: Int, - newTopicsImage: TopicsImage, - log: UnifiedLog - ): Boolean = { - if (log.topicId.isEmpty) { - // Missing topic ID could result from storage failure or unclean shutdown after topic creation but before flushing - // data to the `partition.metadata` file. And before appending data to the log, the `partition.metadata` is always - // flushed to disk. So if the topic ID is missing, it mostly means no data was appended, and we can treat this as - // a stray log. - info(s"The topicId does not exist in $log, treat it as a stray log") - return true - } - - val topicId = log.topicId.get - val partitionId = log.topicPartition.partition() - Option(newTopicsImage.getPartition(topicId, partitionId)) match { - case Some(partition) => - if (!partition.replicas.contains(brokerId)) { - info(s"Found stray log dir $log: the current replica assignment ${partition.replicas.mkString("[", ", ", "]")} " + - s"does not contain the local brokerId $brokerId.") - true - } else { - false - } - - case None => - info(s"Found stray log dir $log: the topicId $topicId does not exist in the metadata image") - true - } - } } diff --git a/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala b/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala index 83973ede23dbf..fe1050222b12b 100755 --- a/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala +++ b/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala @@ -33,8 +33,8 @@ class KafkaMetricsConfig(props: VerifiableProperties) { * Comma-separated list of reporter types. These classes should be on the * classpath and will be instantiated at run-time. */ - val reporters: Seq[String] = Csv.parseCsvList(props.getString(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG, - MetricConfigs.KAFKA_METRIC_REPORTER_CLASSES_DEFAULT)).asScala + val reporters: Seq[String] = (if (props.containsKey(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG)) + Csv.parseCsvList(props.getString(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG)) else MetricConfigs.KAFKA_METRIC_REPORTER_CLASSES_DEFAULT).asScala /** * The metrics polling interval (in seconds). diff --git a/core/src/main/scala/kafka/network/RequestChannel.scala b/core/src/main/scala/kafka/network/RequestChannel.scala index dfb96ef8e11da..a0fbc0452060a 100644 --- a/core/src/main/scala/kafka/network/RequestChannel.scala +++ b/core/src/main/scala/kafka/network/RequestChannel.scala @@ -227,6 +227,8 @@ object RequestChannel extends Logging { Seq(specifiedMetricName, header.apiKey.name) } else if (header.apiKey == ApiKeys.ADD_PARTITIONS_TO_TXN && body[AddPartitionsToTxnRequest].allVerifyOnlyRequest) { Seq(RequestMetrics.VERIFY_PARTITIONS_IN_TXN_METRIC_NAME) + } else if (header.apiKey == ApiKeys.LIST_CONFIG_RESOURCES && header.apiVersion == 0) { + Seq(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME, header.apiKey.name) } else { Seq(header.apiKey.name) } @@ -344,7 +346,10 @@ class RequestChannel(val queueSize: Int, val metrics: RequestChannelMetrics) { import RequestChannel._ - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.network" + private val metricsClassName = "RequestChannel" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val requestQueue = new ArrayBlockingQueue[BaseRequest](queueSize) private val processors = new ConcurrentHashMap[Int, Processor]() diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 4163b563f019a..306b633f6fa37 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -29,7 +29,6 @@ import kafka.network.Processor._ import kafka.network.RequestChannel.{CloseConnectionResponse, EndThrottlingResponse, NoOpResponse, SendResponse, StartThrottlingResponse} import kafka.network.SocketServer._ import kafka.server.{BrokerReconfigurable, KafkaConfig} -import org.apache.kafka.network.EndPoint import org.apache.kafka.common.message.ApiMessageType.ListenerType import kafka.utils._ import org.apache.kafka.common.config.ConfigException @@ -79,8 +78,10 @@ class SocketServer( val socketFactory: ServerSocketFactory = ServerSocketFactory.INSTANCE, val connectionDisconnectListeners: Seq[ConnectionDisconnectListener] = Seq.empty ) extends Logging with BrokerReconfigurable { - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.network" + private val metricsClassName = "SocketServer" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val maxQueuedRequests = config.queuedMaxRequests @@ -96,7 +97,7 @@ class SocketServer( memoryPoolSensor.add(new Meter(TimeUnit.MILLISECONDS, memoryPoolDepletedPercentMetricName, memoryPoolDepletedTimeMetricName)) private val memoryPool = if (config.queuedMaxBytes > 0) new SimpleMemoryPool(config.queuedMaxBytes, config.socketRequestMaxBytes, false, memoryPoolSensor) else MemoryPool.NONE // data-plane - private[network] val dataPlaneAcceptors = new ConcurrentHashMap[EndPoint, DataPlaneAcceptor]() + private[network] val dataPlaneAcceptors = new ConcurrentHashMap[Endpoint, DataPlaneAcceptor]() val dataPlaneRequestChannel = new RequestChannel(maxQueuedRequests, time, apiVersionManager.newRequestMetrics) private[this] val nextProcessorId: AtomicInteger = new AtomicInteger(0) @@ -161,8 +162,8 @@ class SocketServer( * Therefore, we do not know that any particular request processor will be running by the end of * this function -- just that it might be running. * - * @param authorizerFutures Future per [[EndPoint]] used to wait before starting the - * processor corresponding to the [[EndPoint]]. Any endpoint + * @param authorizerFutures Future per [[Endpoint]] used to wait before starting the + * processor corresponding to the [[Endpoint]]. Any endpoint * that does not appear in this map will be started once all * authorizerFutures are complete. * @@ -181,7 +182,7 @@ class SocketServer( // Because of ephemeral ports, we need to match acceptors to futures by looking at // the listener name, rather than the endpoint object. val authorizerFuture = authorizerFutures.find { - case (endpoint, _) => acceptor.endPoint.listenerName.value().equals(endpoint.listenerName().get()) + case (endpoint, _) => acceptor.endPoint.listener.equals(endpoint.listener()) } match { case None => allAuthorizerFuturesComplete case Some((_, future)) => future @@ -210,23 +211,24 @@ class SocketServer( enableFuture } - private def createDataPlaneAcceptorAndProcessors(endpoint: EndPoint): Unit = synchronized { + private def createDataPlaneAcceptorAndProcessors(endpoint: Endpoint): Unit = synchronized { if (stopped) { throw new RuntimeException("Can't create new data plane acceptor and processors: SocketServer is stopped.") } - val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(endpoint.listenerName.configPrefix) - connectionQuotas.addListener(config, endpoint.listenerName) - val isPrivilegedListener = config.interBrokerListenerName == endpoint.listenerName + val listenerName = ListenerName.normalised(endpoint.listener) + val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(listenerName.configPrefix) + connectionQuotas.addListener(config, listenerName) + val isPrivilegedListener = config.interBrokerListenerName == listenerName val dataPlaneAcceptor = createDataPlaneAcceptor(endpoint, isPrivilegedListener, dataPlaneRequestChannel) config.addReconfigurable(dataPlaneAcceptor) dataPlaneAcceptor.configure(parsedConfigs) dataPlaneAcceptors.put(endpoint, dataPlaneAcceptor) - info(s"Created data-plane acceptor and processors for endpoint : ${endpoint.listenerName}") + info(s"Created data-plane acceptor and processors for endpoint : ${listenerName}") } - private def endpoints = config.listeners.map(l => l.listenerName -> l).toMap + private def endpoints = config.listeners.map(l => ListenerName.normalised(l.listener) -> l).toMap - protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { + protected def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { new DataPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, isPrivilegedListener, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager) } @@ -277,7 +279,7 @@ class SocketServer( /** * This method is called to dynamically add listeners. */ - def addListeners(listenersAdded: Seq[EndPoint]): Unit = synchronized { + def addListeners(listenersAdded: Seq[Endpoint]): Unit = synchronized { if (stopped) { throw new RuntimeException("can't add new listeners: SocketServer is stopped.") } @@ -297,10 +299,10 @@ class SocketServer( } } - def removeListeners(listenersRemoved: Seq[EndPoint]): Unit = synchronized { + def removeListeners(listenersRemoved: Seq[Endpoint]): Unit = synchronized { info(s"Removing data-plane listeners for endpoints $listenersRemoved") listenersRemoved.foreach { endpoint => - connectionQuotas.removeListener(config, endpoint.listenerName) + connectionQuotas.removeListener(config, ListenerName.normalised(endpoint.listener)) dataPlaneAcceptors.asScala.remove(endpoint).foreach { acceptor => acceptor.beginShutdown() acceptor.close() @@ -345,7 +347,7 @@ class SocketServer( // For test usage def dataPlaneAcceptor(listenerName: String): Option[DataPlaneAcceptor] = { dataPlaneAcceptors.asScala.foreach { case (endPoint, acceptor) => - if (endPoint.listenerName.value() == listenerName) + if (endPoint.listener == listenerName) return Some(acceptor) } None @@ -374,7 +376,7 @@ object DataPlaneAcceptor { } class DataPlaneAcceptor(socketServer: SocketServer, - endPoint: EndPoint, + endPoint: Endpoint, config: KafkaConfig, nodeId: Int, connectionQuotas: ConnectionQuotas, @@ -404,7 +406,7 @@ class DataPlaneAcceptor(socketServer: SocketServer, * Returns the listener name associated with this reconfigurable. Listener-specific * configs corresponding to this listener name are provided for reconfiguration. */ - override def listenerName(): ListenerName = endPoint.listenerName + override def listenerName(): ListenerName = ListenerName.normalised(endPoint.listener) /** * Returns the names of configs that may be reconfigured. @@ -451,7 +453,7 @@ class DataPlaneAcceptor(socketServer: SocketServer, val newNumNetworkThreads = configs.get(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG).asInstanceOf[Int] if (newNumNetworkThreads != processors.length) { - info(s"Resizing network thread pool size for ${endPoint.listenerName} listener from ${processors.length} to $newNumNetworkThreads") + info(s"Resizing network thread pool size for ${endPoint.listener} listener from ${processors.length} to $newNumNetworkThreads") if (newNumNetworkThreads > processors.length) { addProcessors(newNumNetworkThreads - processors.length) } else if (newNumNetworkThreads < processors.length) { @@ -472,7 +474,7 @@ class DataPlaneAcceptor(socketServer: SocketServer, * Thread that accepts and configures new connections. There is one of these per endpoint. */ private[kafka] abstract class Acceptor(val socketServer: SocketServer, - val endPoint: EndPoint, + val endPoint: Endpoint, var config: KafkaConfig, nodeId: Int, val connectionQuotas: ConnectionQuotas, @@ -485,9 +487,6 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, memoryPool: MemoryPool, apiVersionManager: ApiVersionManager) extends Runnable with Logging { - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - val shouldRun = new AtomicBoolean(true) private val sendBufferSize = config.socketSendBufferBytes @@ -515,15 +514,15 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, private val backwardCompatibilityMetricGroup = new KafkaMetricsGroup("kafka.network", "Acceptor") private val blockedPercentMeterMetricName = backwardCompatibilityMetricGroup.metricName( "AcceptorBlockedPercent", - Map(ListenerMetricTag -> endPoint.listenerName.value).asJava) - private val blockedPercentMeter = metricsGroup.newMeter(blockedPercentMeterMetricName,"blocked time", TimeUnit.NANOSECONDS) + Map(ListenerMetricTag -> endPoint.listener).asJava) + private val blockedPercentMeter = backwardCompatibilityMetricGroup.newMeter(blockedPercentMeterMetricName,"blocked time", TimeUnit.NANOSECONDS) private var currentProcessorIndex = 0 private[network] val throttledSockets = new mutable.PriorityQueue[DelayedCloseSocket]() private val started = new AtomicBoolean() private[network] val startedFuture = new CompletableFuture[Void]() val thread: KafkaThread = KafkaThread.nonDaemon( - s"data-plane-kafka-socket-acceptor-${endPoint.listenerName}-${endPoint.securityProtocol}-${endPoint.port}", + s"data-plane-kafka-socket-acceptor-${endPoint.listener}-${endPoint.securityProtocol}-${endPoint.port}", this) def start(): Unit = synchronized { @@ -535,19 +534,19 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, serverChannel = openServerSocket(endPoint.host, endPoint.port, listenBacklogSize) debug(s"Opened endpoint ${endPoint.host}:${endPoint.port}") } - debug(s"Starting processors for listener ${endPoint.listenerName}") + debug(s"Starting processors for listener ${endPoint.listener}") processors.foreach(_.start()) - debug(s"Starting acceptor thread for listener ${endPoint.listenerName}") + debug(s"Starting acceptor thread for listener ${endPoint.listener}") thread.start() startedFuture.complete(null) started.set(true) } catch { case e: ClosedChannelException => - debug(s"Refusing to start acceptor for ${endPoint.listenerName} since the acceptor has already been shut down.") + debug(s"Refusing to start acceptor for ${endPoint.listener} since the acceptor has already been shut down.") startedFuture.completeExceptionally(e) case t: Throwable => - error(s"Unable to start acceptor for ${endPoint.listenerName}", t) - startedFuture.completeExceptionally(new RuntimeException(s"Unable to start acceptor for ${endPoint.listenerName}", t)) + error(s"Unable to start acceptor for ${endPoint.listener}", t) + startedFuture.completeExceptionally(new RuntimeException(s"Unable to start acceptor for ${endPoint.listener}", t)) } } @@ -628,7 +627,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, new InetSocketAddress(host, port) } val serverChannel = socketServer.socketFactory.openServerSocket( - endPoint.listenerName.value(), + endPoint.listener, socketAddress, listenBacklogSize, recvBufferSize) @@ -682,14 +681,15 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, private def accept(key: SelectionKey): Option[SocketChannel] = { val serverSocketChannel = key.channel().asInstanceOf[ServerSocketChannel] val socketChannel = serverSocketChannel.accept() + val listenerName = ListenerName.normalised(endPoint.listener) try { - connectionQuotas.inc(endPoint.listenerName, socketChannel.socket.getInetAddress, blockedPercentMeter) + connectionQuotas.inc(listenerName, socketChannel.socket.getInetAddress, blockedPercentMeter) configureAcceptedSocketChannel(socketChannel) Some(socketChannel) } catch { case e: TooManyConnectionsException => info(s"Rejected connection from ${e.ip}, address already has the configured maximum of ${e.count} connections.") - connectionQuotas.closeChannel(this, endPoint.listenerName, socketChannel) + connectionQuotas.closeChannel(this, listenerName, socketChannel) None case e: ConnectionThrottledException => val ip = socketChannel.socket.getInetAddress @@ -699,7 +699,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, None case e: IOException => error(s"Encountered an error while configuring the connection, closing it.", e) - connectionQuotas.closeChannel(this, endPoint.listenerName, socketChannel) + connectionQuotas.closeChannel(this, listenerName, socketChannel) None } } @@ -741,7 +741,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, def wakeup(): Unit = nioSelector.wakeup() def addProcessors(toCreate: Int): Unit = synchronized { - val listenerName = endPoint.listenerName + val listenerName = ListenerName.normalised(endPoint.listener) val securityProtocol = endPoint.securityProtocol val listenerProcessors = new ArrayBuffer[Processor]() @@ -761,7 +761,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, listenerName: ListenerName, securityProtocol: SecurityProtocol, connectionDisconnectListeners: Seq[ConnectionDisconnectListener]): Processor = { - val name = s"data-plane-kafka-network-thread-$nodeId-${endPoint.listenerName}-${endPoint.securityProtocol}-$id" + val name = s"data-plane-kafka-network-thread-$nodeId-${endPoint.listener}-${endPoint.securityProtocol}-$id" new Processor(id, time, config.socketRequestMaxBytes, @@ -833,7 +833,9 @@ private[kafka] class Processor( threadName: String, connectionDisconnectListeners: Seq[ConnectionDisconnectListener] ) extends Runnable with Logging { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + private val metricsPackage = "kafka.network" + private val metricsClassName = "Processor" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) val shouldRun: AtomicBoolean = new AtomicBoolean(true) private val started: AtomicBoolean = new AtomicBoolean() diff --git a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala index 01f69b374bc3d..0ee16a1cd78ed 100644 --- a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala +++ b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala @@ -26,9 +26,11 @@ import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.InvalidConfigurationException import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record.{MemoryRecords, Records} +import org.apache.kafka.common.utils.LogContext import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} -import org.apache.kafka.raft.{Isolation, KafkaRaftClient, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, MetadataLogConfig, OffsetAndEpoch, OffsetMetadata, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} +import org.apache.kafka.raft.{Isolation, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, MetadataLogConfig, OffsetMetadata, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} +import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler @@ -71,7 +73,7 @@ final class KafkaMetadataLog private ( case _ => throw new IllegalArgumentException(s"Unhandled read isolation $readIsolation") } - val fetchInfo = log.read(startOffset, config.maxFetchSizeInBytes, isolation, true) + val fetchInfo = log.read(startOffset, config.internalMaxFetchSizeInBytes, isolation, true) new LogFetchInfo( fetchInfo.records, @@ -139,14 +141,14 @@ final class KafkaMetadataLog private ( (log.endOffsetForEpoch(epoch).toScala, earliestSnapshotId().toScala) match { case (Some(offsetAndEpoch), Some(snapshotId)) if ( offsetAndEpoch.offset == snapshotId.offset && - offsetAndEpoch.leaderEpoch == epoch) => + offsetAndEpoch.epoch() == epoch) => // The epoch is smaller than the smallest epoch on the log. Override the diverging // epoch to the oldest snapshot which should be the snapshot at the log start offset new OffsetAndEpoch(snapshotId.offset, snapshotId.epoch) case (Some(offsetAndEpoch), _) => - new OffsetAndEpoch(offsetAndEpoch.offset, offsetAndEpoch.leaderEpoch) + new OffsetAndEpoch(offsetAndEpoch.offset, offsetAndEpoch.epoch()) case (None, _) => new OffsetAndEpoch(endOffset.offset, lastFetchedEpoch) @@ -417,7 +419,7 @@ final class KafkaMetadataLog private ( */ private def readSnapshotTimestamp(snapshotId: OffsetAndEpoch): Option[Long] = { readSnapshot(snapshotId).toScala.map { reader => - Snapshots.lastContainedLogTimestamp(reader) + Snapshots.lastContainedLogTimestamp(reader, new LogContext(logIdent)) } } @@ -555,7 +557,7 @@ final class KafkaMetadataLog private ( scheduler.scheduleOnce( "delete-snapshot-files", () => KafkaMetadataLog.deleteSnapshotFiles(log.dir.toPath, expiredSnapshots), - config.deleteDelayMillis + config.internalDeleteDelayMillis ) } } @@ -586,8 +588,11 @@ object KafkaMetadataLog extends Logging { nodeId: Int ): KafkaMetadataLog = { val props = new Properties() - props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.maxBatchSizeInBytes.toString) - props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) + props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.internalMaxBatchSizeInBytes.toString) + if (config.internalSegmentBytes() != null) + props.setProperty(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, config.internalSegmentBytes().toString) + else + props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) props.setProperty(TopicConfig.SEGMENT_MS_CONFIG, config.logSegmentMillis.toString) props.setProperty(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT.toString) @@ -597,11 +602,7 @@ object KafkaMetadataLog extends Logging { LogConfig.validate(props) val defaultLogConfig = new LogConfig(props) - if (config.logSegmentBytes < config.logSegmentMinBytes) { - throw new InvalidConfigurationException( - s"Cannot set ${MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG} below ${config.logSegmentMinBytes}: ${config.logSegmentBytes}" - ) - } else if (defaultLogConfig.retentionMs >= 0) { + if (defaultLogConfig.retentionMs >= 0) { throw new InvalidConfigurationException( s"Cannot set ${TopicConfig.RETENTION_MS_CONFIG} above -1: ${defaultLogConfig.retentionMs}." ) @@ -637,9 +638,8 @@ object KafkaMetadataLog extends Logging { nodeId ) - // Print a warning if users have overridden the internal config - if (config.logSegmentMinBytes != KafkaRaftClient.MAX_BATCH_SIZE_BYTES) { - metadataLog.error(s"Overriding ${MetadataLogConfig.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG} is only supported for testing. Setting " + + if (defaultLogConfig.segmentSize() < config.logSegmentBytes()) { + metadataLog.error(s"Overriding ${MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG} is only supported for testing. Setting " + s"this value too low may lead to an inability to write batches of metadata records.") } diff --git a/core/src/main/scala/kafka/raft/RaftManager.scala b/core/src/main/scala/kafka/raft/KafkaRaftManager.scala similarity index 85% rename from core/src/main/scala/kafka/raft/RaftManager.scala rename to core/src/main/scala/kafka/raft/KafkaRaftManager.scala index 84dfa5ebee001..86950e1ce2540 100644 --- a/core/src/main/scala/kafka/raft/RaftManager.scala +++ b/core/src/main/scala/kafka/raft/KafkaRaftManager.scala @@ -20,17 +20,13 @@ import java.io.File import java.net.InetSocketAddress import java.nio.file.Files import java.nio.file.Paths -import java.util.OptionalInt +import java.util.{OptionalInt, Collection => JCollection, Map => JMap} import java.util.concurrent.CompletableFuture -import java.util.{Map => JMap} -import java.util.{Collection => JCollection} -import kafka.log.LogManager import kafka.server.KafkaConfig import kafka.utils.CoreUtils import kafka.utils.Logging import org.apache.kafka.clients.{ApiVersions, ManualMetadataUpdater, MetadataRecoveryStrategy, NetworkClient} import org.apache.kafka.common.KafkaException -import org.apache.kafka.common.Node import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.Uuid import org.apache.kafka.common.metrics.Metrics @@ -41,17 +37,16 @@ import org.apache.kafka.common.requests.RequestHeader import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time, Utils} -import org.apache.kafka.raft.{Endpoints, ExternalKRaftMetrics, FileQuorumStateStore, KafkaNetworkChannel, KafkaRaftClient, KafkaRaftClientDriver, LeaderAndEpoch, MetadataLogConfig, QuorumConfig, RaftClient, ReplicatedLog, TimingWheelExpirationService} +import org.apache.kafka.raft.{Endpoints, ExternalKRaftMetrics, FileQuorumStateStore, KafkaNetworkChannel, KafkaRaftClient, KafkaRaftClientDriver, MetadataLogConfig, QuorumConfig, RaftManager, ReplicatedLog, TimingWheelExpirationService} import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.common.Feature import org.apache.kafka.server.common.serialization.RecordSerde import org.apache.kafka.server.util.{FileLock, KafkaScheduler} import org.apache.kafka.server.fault.FaultHandler import org.apache.kafka.server.util.timer.SystemTimer -import org.apache.kafka.storage.internals.log.UnifiedLog +import org.apache.kafka.storage.internals.log.{LogManager, UnifiedLog} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters._ object KafkaRaftManager { private def createLogDirectory(logDir: File, logDirName: String): File = { @@ -62,7 +57,7 @@ object KafkaRaftManager { } private def lockDataDir(dataDir: File): FileLock = { - val lock = new FileLock(new File(dataDir, LogManager.LockFileName)) + val lock = new FileLock(new File(dataDir, LogManager.LOCK_FILE_NAME)) if (!lock.tryLock()) { throw new KafkaException( @@ -80,34 +75,12 @@ object KafkaRaftManager { private def hasDifferentLogDir(config: KafkaConfig): Boolean = { !config .logDirs + .asScala .map(Paths.get(_).toAbsolutePath) .contains(Paths.get(config.metadataLogDir).toAbsolutePath) } } -trait RaftManager[T] { - def handleRequest( - context: RequestContext, - header: RequestHeader, - request: ApiMessage, - createdTimeMs: Long - ): CompletableFuture[ApiMessage] - - def register( - listener: RaftClient.Listener[T] - ): Unit - - def leaderAndEpoch: LeaderAndEpoch - - def client: RaftClient[T] - - def replicatedLog: ReplicatedLog - - def voterNode(id: Int, listener: ListenerName): Option[Node] - - def recordSerde: RecordSerde[T] -} - class KafkaRaftManager[T]( clusterId: String, config: KafkaConfig, @@ -178,12 +151,6 @@ class KafkaRaftManager[T]( CoreUtils.swallow(dataDirLock.foreach(_.destroy()), this) } - override def register( - listener: RaftClient.Listener[T] - ): Unit = { - client.register(listener) - } - override def handleRequest( context: RequestContext, header: RequestHeader, @@ -236,11 +203,9 @@ class KafkaRaftManager[T]( } private def buildNetworkClient(): (ListenerName, NetworkClient) = { - val controllerListenerName = new ListenerName(config.controllerListenerNames.head) - val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse( - controllerListenerName, - SecurityProtocol.forName(controllerListenerName.value()) - ) + val controllerListenerName = new ListenerName(config.controllerListenerNames.get(0)) + val controllerSecurityProtocol = Option(config.effectiveListenerSecurityProtocolMap.get(controllerListenerName)) + .getOrElse(SecurityProtocol.forName(controllerListenerName.value())) val channelBuilder = ChannelBuilders.clientChannelBuilder( controllerSecurityProtocol, JaasContext.Type.SERVER, @@ -294,13 +259,5 @@ class KafkaRaftManager[T]( (controllerListenerName, networkClient) } - override def leaderAndEpoch: LeaderAndEpoch = { - client.leaderAndEpoch - } - - override def voterNode(id: Int, listener: ListenerName): Option[Node] = { - client.voterNode(id, listener).toScala - } - override def recordSerde: RecordSerde[T] = serde } diff --git a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala index 764693ca3ace8..958c92ac1854b 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala @@ -22,13 +22,18 @@ import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.utils.Utils import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.PartitionFetchState import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: String, clientId: String, numFetchers: Int) extends Logging { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = this.getClass.getSimpleName + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) // map of (source broker_id, fetcher_id per source broker) => fetcher. // package private for test @@ -43,8 +48,7 @@ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: Stri metricsGroup.newGauge("MaxLag", () => { // current max lag across all fetchers/topics/partitions fetcherThreadMap.values.foldLeft(0L) { (curMaxLagAll, fetcherThread) => - val maxLagThread = fetcherThread.fetcherLagStats.stats.values.foldLeft(0L)((curMaxLagThread, lagMetrics) => - math.max(curMaxLagThread, lagMetrics.lag)) + val maxLagThread = fetcherThread.fetcherLagStats.stats.values.stream().mapToLong(v => v.lag).max().orElse(0L) math.max(curMaxLagAll, maxLagThread) } }, tags) @@ -70,7 +74,7 @@ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: Stri if (id.fetcherId >= newSize) thread.shutdown() partitionStates.foreachEntry { (topicPartition, currentFetchState) => - val initialFetchState = InitialFetchState(currentFetchState.topicId, thread.leader.brokerEndPoint(), + val initialFetchState = InitialFetchState(currentFetchState.topicId.toScala, thread.leader.brokerEndPoint(), currentLeaderEpoch = currentFetchState.currentLeaderEpoch, initOffset = currentFetchState.fetchOffset) allRemovedPartitionsMap += topicPartition -> initialFetchState diff --git a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala index 50436dda1cd37..1e8841df0ca98 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala @@ -18,9 +18,8 @@ package kafka.server import com.yammer.metrics.core.Meter -import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.CoreUtils.inLock -import kafka.utils.{Logging, Pool} +import kafka.utils.Logging import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.PartitionStates import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset @@ -30,9 +29,14 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{FileRecords, MemoryRecords, Records} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.requests._ -import org.apache.kafka.common.utils.Time + import org.apache.kafka.common.{ClientIdAndBroker, InvalidRecordException, TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.LeaderEndPoint +import org.apache.kafka.server.ResultWithPartitions +import org.apache.kafka.server.ReplicaState +import org.apache.kafka.server.PartitionFetchState +import org.apache.kafka.server.log.remote.storage.RetriableRemoteStorageException import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.ShutdownableThread import org.apache.kafka.storage.internals.log.LogAppendInfo @@ -41,7 +45,7 @@ import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.nio.ByteBuffer import java.util import java.util.Optional -import java.util.concurrent.TimeUnit +import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.locks.ReentrantLock import scala.collection.{Map, Set, mutable} @@ -116,9 +120,11 @@ abstract class AbstractFetcherThread(name: String, private def maybeFetch(): Unit = { val fetchRequestOpt = inLock(partitionMapLock) { - val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = leader.buildFetch(partitionStates.partitionStateMap.asScala) + val result = leader.buildFetch(partitionStates.partitionStateMap) + val fetchRequestOpt = result.result + val partitionsWithError = result.partitionsWithError - handlePartitionsWithErrors(partitionsWithError, "maybeFetch") + handlePartitionsWithErrors(partitionsWithError.asScala, "maybeFetch") if (fetchRequestOpt.isEmpty) { trace(s"There are no active partitions. Back off for $fetchBackOffMs ms before sending a fetch request") @@ -128,9 +134,9 @@ abstract class AbstractFetcherThread(name: String, fetchRequestOpt } - fetchRequestOpt.foreach { case ReplicaFetch(sessionPartitions, fetchRequest) => - processFetchRequest(sessionPartitions, fetchRequest) - } + fetchRequestOpt.ifPresent(replicaFetch => + processFetchRequest(replicaFetch.partitionData, replicaFetch.fetchRequest) + ) } // deal with partitions with errors, potentially due to leadership changes @@ -204,11 +210,13 @@ abstract class AbstractFetcherThread(name: String, * occur during truncation. */ private def truncateToEpochEndOffsets(latestEpochsForPartitions: Map[TopicPartition, EpochData]): Unit = { - val endOffsets = leader.fetchEpochEndOffsets(latestEpochsForPartitions) - //Ensure we hold a lock during truncation. + val endOffsets = leader.fetchEpochEndOffsets(latestEpochsForPartitions.asJava) + // Ensure we hold a lock during truncation + inLock(partitionMapLock) { //Check no leadership and no leader epoch changes happened whilst we were unlocked, fetching epochs - val epochEndOffsets = endOffsets.filter { case (tp, _) => + + val epochEndOffsets = endOffsets.asScala.filter { case (tp, _) => val curPartitionState = partitionStates.stateValue(tp) val partitionEpochRequest = latestEpochsForPartitions.getOrElse(tp, { throw new IllegalStateException( @@ -218,18 +226,18 @@ abstract class AbstractFetcherThread(name: String, curPartitionState != null && leaderEpochInRequest == curPartitionState.currentLeaderEpoch } - val ResultWithPartitions(fetchOffsets, partitionsWithError) = maybeTruncateToEpochEndOffsets(epochEndOffsets, latestEpochsForPartitions) - handlePartitionsWithErrors(partitionsWithError, "truncateToEpochEndOffsets") - updateFetchOffsetAndMaybeMarkTruncationComplete(fetchOffsets) + val result = maybeTruncateToEpochEndOffsets(epochEndOffsets, latestEpochsForPartitions) + handlePartitionsWithErrors(result.partitionsWithError.asScala, "truncateToEpochEndOffsets") + updateFetchOffsetAndMaybeMarkTruncationComplete(result.result) } } // Visibility for unit tests protected[server] def truncateOnFetchResponse(epochEndOffsets: Map[TopicPartition, EpochEndOffset]): Unit = { inLock(partitionMapLock) { - val ResultWithPartitions(fetchOffsets, partitionsWithError) = maybeTruncateToEpochEndOffsets(epochEndOffsets, Map.empty) - handlePartitionsWithErrors(partitionsWithError, "truncateOnFetchResponse") - updateFetchOffsetAndMaybeMarkTruncationComplete(fetchOffsets) + val result = maybeTruncateToEpochEndOffsets(epochEndOffsets, Map.empty) + handlePartitionsWithErrors(result.partitionsWithError.asScala, "truncateOnFetchResponse") + updateFetchOffsetAndMaybeMarkTruncationComplete(result.result) } } @@ -284,7 +292,7 @@ abstract class AbstractFetcherThread(name: String, } } - ResultWithPartitions(fetchOffsets, partitionsWithError) + new ResultWithPartitions(fetchOffsets, partitionsWithError.asJava) } /** @@ -316,7 +324,7 @@ abstract class AbstractFetcherThread(name: String, try { trace(s"Sending fetch request $fetchRequest") - responseData = leader.fetch(fetchRequest) + responseData = leader.fetch(fetchRequest).asScala } catch { case t: Throwable => if (isRunning) { @@ -381,8 +389,8 @@ abstract class AbstractFetcherThread(name: String, val lastFetchedEpoch = if (logAppendInfo.lastLeaderEpoch.isPresent) logAppendInfo.lastLeaderEpoch else currentFetchState.lastFetchedEpoch // Update partitionStates only if there is no exception during processPartitionData - val newFetchState = PartitionFetchState(currentFetchState.topicId, nextOffset, Some(lag), - currentFetchState.currentLeaderEpoch, state = Fetching, lastFetchedEpoch) + val newFetchState = new PartitionFetchState(currentFetchState.topicId, nextOffset, Optional.of(lag), + currentFetchState.currentLeaderEpoch, ReplicaState.FETCHING, lastFetchedEpoch) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) if (validBytes > 0) fetcherStats.byteRate.mark(validBytes) } @@ -475,9 +483,9 @@ abstract class AbstractFetcherThread(name: String, partitionMapLock.lockInterruptibly() try { Option(partitionStates.stateValue(topicPartition)).foreach { state => - val newState = PartitionFetchState(state.topicId, math.min(truncationOffset, state.fetchOffset), - state.lag, state.currentLeaderEpoch, state.delay, state = Truncating, - lastFetchedEpoch = Optional.empty) + val newState = new PartitionFetchState(state.topicId, math.min(truncationOffset, state.fetchOffset), + state.lag, state.currentLeaderEpoch, state.delay, ReplicaState.TRUNCATING, + Optional.empty()) partitionStates.updateAndMoveToEnd(topicPartition, newState) partitionMapCond.signalAll() } @@ -507,12 +515,12 @@ abstract class AbstractFetcherThread(name: String, // With old message format, `latestEpoch` will be empty and we use Truncating state // to truncate to high watermark. val lastFetchedEpoch = latestEpoch(tp) - val state = if (lastFetchedEpoch.isPresent) Fetching else Truncating - PartitionFetchState(initialFetchState.topicId, initialFetchState.initOffset, None, initialFetchState.currentLeaderEpoch, + val state = if (lastFetchedEpoch.isPresent) ReplicaState.FETCHING else ReplicaState.TRUNCATING + new PartitionFetchState(initialFetchState.topicId.toJava, initialFetchState.initOffset, Optional.empty(), initialFetchState.currentLeaderEpoch, state, lastFetchedEpoch) } else { - PartitionFetchState(initialFetchState.topicId, initialFetchState.initOffset, None, initialFetchState.currentLeaderEpoch, - state = Truncating, lastFetchedEpoch = Optional.empty) + new PartitionFetchState(initialFetchState.topicId.toJava, initialFetchState.initOffset, Optional.empty(), initialFetchState.currentLeaderEpoch, + ReplicaState.TRUNCATING, Optional.empty()) } } @@ -538,7 +546,7 @@ abstract class AbstractFetcherThread(name: String, partitions.foreach { tp => val currentState = partitionStates.stateValue(tp) if (currentState != null) { - val updatedState = currentState.updateTopicId(topicIds(tp.topic)) + val updatedState = currentState.updateTopicId(topicIds(tp.topic).toJava) partitionStates.update(tp, updatedState) } } @@ -559,10 +567,10 @@ abstract class AbstractFetcherThread(name: String, case Some(offsetTruncationState) => val lastFetchedEpoch = latestEpoch(topicPartition) val state = if (leader.isTruncationOnFetchSupported || offsetTruncationState.truncationCompleted) - Fetching + ReplicaState.FETCHING else - Truncating - PartitionFetchState(currentFetchState.topicId, offsetTruncationState.offset, currentFetchState.lag, + ReplicaState.TRUNCATING + new PartitionFetchState(currentFetchState.topicId, offsetTruncationState.offset, currentFetchState.lag, currentFetchState.currentLeaderEpoch, currentFetchState.delay, state, lastFetchedEpoch) case None => currentFetchState } @@ -619,7 +627,7 @@ abstract class AbstractFetcherThread(name: String, if (endOffsetForEpochOpt.isPresent) { val offsetAndEpoch = endOffsetForEpochOpt.get val followerEndOffset = offsetAndEpoch.offset - val followerEpoch = offsetAndEpoch.leaderEpoch + val followerEpoch = offsetAndEpoch.epoch() if (followerEpoch != leaderEpochOffset.leaderEpoch) { // the follower does not know about the epoch that leader replied with // we truncate to the end offset of the largest epoch that is smaller than the @@ -671,8 +679,8 @@ abstract class AbstractFetcherThread(name: String, truncate(topicPartition, OffsetTruncationState(leaderEndOffset, truncationCompleted = true)) fetcherLagStats.getAndMaybePut(topicPartition).lag = 0 - PartitionFetchState(topicId, leaderEndOffset, Some(0), currentLeaderEpoch, - state = Fetching, lastFetchedEpoch = latestEpoch(topicPartition)) + new PartitionFetchState(topicId.toJava, leaderEndOffset, Optional.of(0L), currentLeaderEpoch, + ReplicaState.FETCHING, latestEpoch(topicPartition)) } else { /** * If the leader's log end offset is greater than the follower's log end offset, there are two possibilities: @@ -711,8 +719,8 @@ abstract class AbstractFetcherThread(name: String, val initialLag = leaderEndOffset - offsetToFetch fetcherLagStats.getAndMaybePut(topicPartition).lag = initialLag - PartitionFetchState(topicId, offsetToFetch, Some(initialLag), currentLeaderEpoch, - state = Fetching, lastFetchedEpoch = latestEpoch(topicPartition)) + new PartitionFetchState(topicId.toJava, offsetToFetch, Optional.of(initialLag), currentLeaderEpoch, + ReplicaState.FETCHING, latestEpoch(topicPartition)) } } @@ -734,7 +742,7 @@ abstract class AbstractFetcherThread(name: String, fetchState: PartitionFetchState, leaderEpochInRequest: Optional[Integer]): Boolean = { try { - val newFetchState = fetchOffsetAndTruncate(topicPartition, fetchState.topicId, fetchState.currentLeaderEpoch) + val newFetchState = fetchOffsetAndTruncate(topicPartition, fetchState.topicId.toScala, fetchState.currentLeaderEpoch) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) info(s"Current offset ${fetchState.fetchOffset} for partition $topicPartition is " + s"out of range, which typically implies a leader change. Reset fetch offset to ${newFetchState.fetchOffset}") @@ -779,7 +787,7 @@ abstract class AbstractFetcherThread(name: String, // TODO: use fetchTierStateMachine.maybeAdvanceState when implementing async tiering logic in KAFKA-13560 - fetcherLagStats.getAndMaybePut(topicPartition).lag = newFetchState.lag.getOrElse(0) + fetcherLagStats.getAndMaybePut(topicPartition).lag = newFetchState.lag.orElse(0L) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) debug(s"Current offset ${fetchState.fetchOffset} for partition $topicPartition is " + s"out of range or moved to remote tier. Reset fetch offset to ${newFetchState.fetchOffset}") @@ -789,7 +797,8 @@ abstract class AbstractFetcherThread(name: String, onPartitionFenced(topicPartition, leaderEpochInRequest) case e@(_: UnknownTopicOrPartitionException | _: UnknownLeaderEpochException | - _: NotLeaderOrFollowerException) => + _: NotLeaderOrFollowerException | + _: RetriableRemoteStorageException) => info(s"Could not build remote log auxiliary state for $topicPartition due to error: ${e.getMessage}") false case e: Throwable => @@ -804,9 +813,15 @@ abstract class AbstractFetcherThread(name: String, for (partition <- partitions) { Option(partitionStates.stateValue(partition)).foreach { currentFetchState => if (!currentFetchState.isDelayed) { - partitionStates.updateAndMoveToEnd(partition, PartitionFetchState(currentFetchState.topicId, currentFetchState.fetchOffset, - currentFetchState.lag, currentFetchState.currentLeaderEpoch, Some(delay), - currentFetchState.state, currentFetchState.lastFetchedEpoch)) + partitionStates.updateAndMoveToEnd(partition, + new PartitionFetchState( + currentFetchState.topicId, + currentFetchState.fetchOffset, + currentFetchState.lag, + currentFetchState.currentLeaderEpoch, + Optional.of(delay), + currentFetchState.state, + currentFetchState.lastFetchedEpoch)) } } } @@ -866,14 +881,6 @@ abstract class AbstractFetcherThread(name: String, } } -object AbstractFetcherThread { - - case class ReplicaFetch(partitionData: util.Map[TopicPartition, FetchRequest.PartitionData], fetchRequest: FetchRequest.Builder) - - case class ResultWithPartitions[R](result: R, partitionsWithError: Set[TopicPartition]) - -} - object FetcherMetrics { val ConsumerLag = "ConsumerLag" val RequestsPerSec = "RequestsPerSec" @@ -881,7 +888,10 @@ object FetcherMetrics { } class FetcherLagMetrics(metricId: ClientIdTopicPartition) { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "FetcherLagMetrics" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private[this] val lagVal = new AtomicLong(-1L) private[this] val tags = Map( @@ -903,11 +913,10 @@ class FetcherLagMetrics(metricId: ClientIdTopicPartition) { } class FetcherLagStats(metricId: ClientIdAndBroker) { - private val valueFactory = (k: TopicPartition) => new FetcherLagMetrics(ClientIdTopicPartition(metricId.clientId, k)) - val stats = new Pool[TopicPartition, FetcherLagMetrics](Some(valueFactory)) + val stats = new ConcurrentHashMap[TopicPartition, FetcherLagMetrics] def getAndMaybePut(topicPartition: TopicPartition): FetcherLagMetrics = { - stats.getAndMaybePut(topicPartition) + stats.computeIfAbsent(topicPartition, k => new FetcherLagMetrics(ClientIdTopicPartition(metricId.clientId, k))) } def unregister(topicPartition: TopicPartition): Unit = { @@ -916,14 +925,15 @@ class FetcherLagStats(metricId: ClientIdAndBroker) { } def unregister(): Unit = { - stats.keys.toBuffer.foreach { key: TopicPartition => - unregister(key) - } + stats.forEach((key, _) => unregister(key)) } } class FetcherStats(metricId: ClientIdAndBroker) { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "FetcherStats" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) val tags: util.Map[String, String] = Map("clientId" -> metricId.clientId, "brokerHost" -> metricId.brokerHost, @@ -944,61 +954,6 @@ case class ClientIdTopicPartition(clientId: String, topicPartition: TopicPartiti override def toString: String = s"$clientId-$topicPartition" } -sealed trait ReplicaState - -case object Truncating extends ReplicaState - -case object Fetching extends ReplicaState - -object PartitionFetchState { - def apply(topicId: Option[Uuid], offset: Long, lag: Option[Long], currentLeaderEpoch: Int, state: ReplicaState, - lastFetchedEpoch: Optional[Integer]): PartitionFetchState = { - PartitionFetchState(topicId, offset, lag, currentLeaderEpoch, None, state, lastFetchedEpoch) - } -} - - -/** - * case class to keep partition offset and its state(truncatingLog, delayed) - * This represents a partition as being either: - * (1) Truncating its log, for example, having recently become a follower - * (2) Delayed, for example, due to an error, where we subsequently back off a bit - * (3) ReadyForFetch, the active state where the thread is actively fetching data. - */ -case class PartitionFetchState(topicId: Option[Uuid], - fetchOffset: Long, - lag: Option[Long], - currentLeaderEpoch: Int, - delay: Option[Long], - state: ReplicaState, - lastFetchedEpoch: Optional[Integer]) { - - private val dueMs = delay.map(_ + Time.SYSTEM.milliseconds) - - def isReadyForFetch: Boolean = state == Fetching && !isDelayed - - def isReplicaInSync: Boolean = lag.isDefined && lag.get <= 0 - - def isTruncating: Boolean = state == Truncating && !isDelayed - - def isDelayed: Boolean = dueMs.exists(_ > Time.SYSTEM.milliseconds) - - override def toString: String = { - s"FetchState(topicId=$topicId" + - s", fetchOffset=$fetchOffset" + - s", currentLeaderEpoch=$currentLeaderEpoch" + - s", lastFetchedEpoch=$lastFetchedEpoch" + - s", state=$state" + - s", lag=$lag" + - s", delay=${delay.getOrElse(0)}ms" + - s")" - } - - def updateTopicId(topicId: Option[Uuid]): PartitionFetchState = { - this.copy(topicId = topicId) - } -} - case class OffsetTruncationState(offset: Long, truncationCompleted: Boolean) { def this(offset: Long) = this(offset, true) diff --git a/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala b/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala deleted file mode 100644 index b7e3bd36d8478..0000000000000 --- a/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala +++ /dev/null @@ -1,315 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.server.AddPartitionsToTxnManager.{VerificationFailureRateMetricName, VerificationTimeMsMetricName} -import kafka.utils.Logging -import org.apache.kafka.clients.{ClientResponse, NetworkClient, RequestCompletionHandler} -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.{Node, TopicPartition} -import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, MetadataResponse} -import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.MetadataCache -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} - -import java.util -import java.util.concurrent.TimeUnit -import scala.collection.{Seq, mutable} -import scala.jdk.CollectionConverters._ - -object AddPartitionsToTxnManager { - type AppendCallback = Map[TopicPartition, Errors] => Unit - - val VerificationFailureRateMetricName = "VerificationFailureRate" - val VerificationTimeMsMetricName = "VerificationTimeMs" - - def produceRequestVersionToTransactionSupportedOperation(version: Short): TransactionSupportedOperation = { - if (version > 11) { - addPartition - } else if (version > 10) { - genericErrorSupported - } else { - defaultError - } - } - - def txnOffsetCommitRequestVersionToTransactionSupportedOperation(version: Int): TransactionSupportedOperation = { - if (version > 4) { - addPartition - } else if (version > 3) { - genericErrorSupported - } else { - defaultError - } - } -} - -/** - * This is an enum which handles the Partition Response based on the Request Version and the exact operation - * defaultError: This is the default workflow which maps to cases when the Produce Request Version or the Txn_offset_commit request was lower than the first version supporting the new Error Class - * genericErrorSupported: This maps to the case when the clients are updated to handle the TransactionAbortableException - * addPartition: This allows the partition to be added to the transactions inflight with the Produce and TxnOffsetCommit requests. Plus the behaviors in genericErrorSupported. - */ -sealed trait TransactionSupportedOperation { - val supportsEpochBump = false; -} -case object defaultError extends TransactionSupportedOperation -case object genericErrorSupported extends TransactionSupportedOperation -case object addPartition extends TransactionSupportedOperation { - override val supportsEpochBump = true -} - -/* - * Data structure to hold the transactional data to send to a node. Note -- at most one request per transactional ID - * will exist at a time in the map. If a given transactional ID exists in the map, and a new request with the same ID - * comes in, one request will be in the map and one will return to the producer with a response depending on the epoch. - */ -class TransactionDataAndCallbacks(val transactionData: AddPartitionsToTxnTransactionCollection, - val callbacks: mutable.Map[String, AddPartitionsToTxnManager.AppendCallback], - val startTimeMs: mutable.Map[String, Long], - val transactionSupportedOperation: TransactionSupportedOperation) - -class AddPartitionsToTxnManager( - config: KafkaConfig, - client: NetworkClient, - metadataCache: MetadataCache, - partitionFor: String => Int, - time: Time -) extends InterBrokerSendThread( - "AddPartitionsToTxnSenderThread-" + config.brokerId, - client, - config.requestTimeoutMs, - time -) with Logging { - - this.logIdent = logPrefix - - private val interBrokerListenerName = config.interBrokerListenerName - private val inflightNodes = mutable.HashSet[Node]() - private val nodesToTransactions = mutable.Map[Node, TransactionDataAndCallbacks]() - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - private val verificationFailureRate = metricsGroup.newMeter(VerificationFailureRateMetricName, "failures", TimeUnit.SECONDS) - private val verificationTimeMs = metricsGroup.newHistogram(VerificationTimeMsMetricName) - - def addOrVerifyTransaction( - transactionalId: String, - producerId: Long, - producerEpoch: Short, - topicPartitions: Seq[TopicPartition], - callback: AddPartitionsToTxnManager.AppendCallback, - transactionSupportedOperation: TransactionSupportedOperation - ): Unit = { - val coordinatorNode = getTransactionCoordinator(partitionFor(transactionalId)) - if (coordinatorNode.isEmpty) { - callback(topicPartitions.map(tp => tp -> Errors.COORDINATOR_NOT_AVAILABLE).toMap) - } else { - val topicCollection = new AddPartitionsToTxnTopicCollection() - topicPartitions.groupBy(_.topic).foreachEntry { (topic, tps) => - topicCollection.add(new AddPartitionsToTxnTopic() - .setName(topic) - .setPartitions(tps.map(tp => Int.box(tp.partition)).toList.asJava)) - } - - val transactionData = new AddPartitionsToTxnTransaction() - .setTransactionalId(transactionalId) - .setProducerId(producerId) - .setProducerEpoch(producerEpoch) - .setVerifyOnly(!transactionSupportedOperation.supportsEpochBump) - .setTopics(topicCollection) - - addTxnData(coordinatorNode.get, transactionData, callback, transactionSupportedOperation) - - } - } - - private def addTxnData( - node: Node, - transactionData: AddPartitionsToTxnTransaction, - callback: AddPartitionsToTxnManager.AppendCallback, - transactionSupportedOperation: TransactionSupportedOperation - ): Unit = { - nodesToTransactions.synchronized { - val curTime = time.milliseconds() - // Check if we have already have either node or individual transaction. Add the Node if it isn't there. - val existingNodeAndTransactionData = nodesToTransactions.getOrElseUpdate(node, - new TransactionDataAndCallbacks( - new AddPartitionsToTxnTransactionCollection(1), - mutable.Map[String, AddPartitionsToTxnManager.AppendCallback](), - mutable.Map[String, Long](), - transactionSupportedOperation)) - - val existingTransactionData = existingNodeAndTransactionData.transactionData.find(transactionData.transactionalId) - - // There are 3 cases if we already have existing data - // 1. Incoming data has a higher epoch -- return INVALID_PRODUCER_EPOCH for existing data since it is fenced - // 2. Incoming data has the same epoch -- return NETWORK_EXCEPTION for existing data, since the client is likely retrying and we want another retriable exception - // 3. Incoming data has a lower epoch -- return INVALID_PRODUCER_EPOCH for the incoming data since it is fenced, do not add incoming data to verify - if (existingTransactionData != null) { - if (existingTransactionData.producerEpoch <= transactionData.producerEpoch) { - val error = if (existingTransactionData.producerEpoch < transactionData.producerEpoch) - Errors.INVALID_PRODUCER_EPOCH - else - Errors.NETWORK_EXCEPTION - val oldCallback = existingNodeAndTransactionData.callbacks(transactionData.transactionalId) - existingNodeAndTransactionData.transactionData.remove(transactionData) - sendCallback(oldCallback, topicPartitionsToError(existingTransactionData, error), existingNodeAndTransactionData.startTimeMs(transactionData.transactionalId)) - } else { - // If the incoming transactionData's epoch is lower, we can return with INVALID_PRODUCER_EPOCH immediately. - sendCallback(callback, topicPartitionsToError(transactionData, Errors.INVALID_PRODUCER_EPOCH), curTime) - return - } - } - - existingNodeAndTransactionData.transactionData.add(transactionData) - existingNodeAndTransactionData.callbacks.put(transactionData.transactionalId, callback) - existingNodeAndTransactionData.startTimeMs.put(transactionData.transactionalId, curTime) - wakeup() - } - } - - private def getTransactionCoordinator(partition: Int): util.Optional[Node] = { - metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, partition) - .filter(_.leader != MetadataResponse.NO_LEADER_ID) - .flatMap(metadata => metadataCache.getAliveBrokerNode(metadata.leader, interBrokerListenerName)) - } - - private def topicPartitionsToError(transactionData: AddPartitionsToTxnTransaction, error: Errors): Map[TopicPartition, Errors] = { - val topicPartitionsToError = mutable.Map[TopicPartition, Errors]() - transactionData.topics.forEach { topic => - topic.partitions.forEach { partition => - topicPartitionsToError.put(new TopicPartition(topic.name, partition), error) - } - } - verificationFailureRate.mark(topicPartitionsToError.size) - topicPartitionsToError.toMap - } - - private def sendCallback(callback: AddPartitionsToTxnManager.AppendCallback, errorMap: Map[TopicPartition, Errors], startTimeMs: Long): Unit = { - verificationTimeMs.update(time.milliseconds() - startTimeMs) - callback(errorMap) - } - - private class AddPartitionsToTxnHandler(node: Node, transactionDataAndCallbacks: TransactionDataAndCallbacks) extends RequestCompletionHandler { - override def onComplete(response: ClientResponse): Unit = { - // Note: Synchronization is not needed on inflightNodes since it is always accessed from this thread. - inflightNodes.remove(node) - if (response.authenticationException != null) { - error(s"AddPartitionsToTxnRequest failed for node ${response.destination} with an " + - "authentication exception.", response.authenticationException) - sendCallbacksToAll(Errors.forException(response.authenticationException).code) - } else if (response.versionMismatch != null) { - // We may see unsupported version exception if we try to send a verify only request to a broker that can't handle it. - // In this case, skip verification. - warn(s"AddPartitionsToTxnRequest failed for node ${response.destination} with invalid version exception. This suggests verification is not supported." + - s"Continuing handling the produce request.") - transactionDataAndCallbacks.callbacks.foreach { case (txnId, callback) => - sendCallback(callback, Map.empty, transactionDataAndCallbacks.startTimeMs(txnId)) - } - } else if (response.wasDisconnected || response.wasTimedOut) { - warn(s"AddPartitionsToTxnRequest failed for node ${response.destination} with a network exception.") - sendCallbacksToAll(Errors.NETWORK_EXCEPTION.code) - } else { - val addPartitionsToTxnResponseData = response.responseBody.asInstanceOf[AddPartitionsToTxnResponse].data - if (addPartitionsToTxnResponseData.errorCode != 0) { - error(s"AddPartitionsToTxnRequest for node ${response.destination} returned with error ${Errors.forCode(addPartitionsToTxnResponseData.errorCode)}.") - // The client should not be exposed to CLUSTER_AUTHORIZATION_FAILED so modify the error to signify the verification did not complete. - // Return INVALID_TXN_STATE. - val finalError = if (addPartitionsToTxnResponseData.errorCode == Errors.CLUSTER_AUTHORIZATION_FAILED.code) - Errors.INVALID_TXN_STATE.code - else - addPartitionsToTxnResponseData.errorCode - - sendCallbacksToAll(finalError) - } else { - addPartitionsToTxnResponseData.resultsByTransaction.forEach { transactionResult => - val unverified = mutable.Map[TopicPartition, Errors]() - transactionResult.topicResults.forEach { topicResult => - topicResult.resultsByPartition.forEach { partitionResult => - val tp = new TopicPartition(topicResult.name, partitionResult.partitionIndex) - if (partitionResult.partitionErrorCode != Errors.NONE.code) { - // Producers expect to handle INVALID_PRODUCER_EPOCH in this scenario. - val code = - if (partitionResult.partitionErrorCode == Errors.PRODUCER_FENCED.code) - Errors.INVALID_PRODUCER_EPOCH.code - else if (partitionResult.partitionErrorCode() == Errors.TRANSACTION_ABORTABLE.code - && transactionDataAndCallbacks.transactionSupportedOperation == defaultError) // For backward compatibility with clients. - Errors.INVALID_TXN_STATE.code - else - partitionResult.partitionErrorCode - unverified.put(tp, Errors.forCode(code)) - } - } - } - verificationFailureRate.mark(unverified.size) - val callback = transactionDataAndCallbacks.callbacks(transactionResult.transactionalId) - sendCallback(callback, unverified.toMap, transactionDataAndCallbacks.startTimeMs(transactionResult.transactionalId)) - } - } - } - wakeup() - } - - private def buildErrorMap(transactionalId: String, errorCode: Short): Map[TopicPartition, Errors] = { - val transactionData = transactionDataAndCallbacks.transactionData.find(transactionalId) - topicPartitionsToError(transactionData, Errors.forCode(errorCode)) - } - - private def sendCallbacksToAll(errorCode: Short): Unit = { - transactionDataAndCallbacks.callbacks.foreach { case (txnId, callback) => - sendCallback(callback, buildErrorMap(txnId, errorCode), transactionDataAndCallbacks.startTimeMs(txnId)) - } - } - } - - override def generateRequests(): util.Collection[RequestAndCompletionHandler] = { - // build and add requests to queue - val list = new util.ArrayList[RequestAndCompletionHandler]() - val currentTimeMs = time.milliseconds() - val removedNodes = mutable.Set[Node]() - nodesToTransactions.synchronized { - nodesToTransactions.foreach { case (node, transactionDataAndCallbacks) => - if (!inflightNodes.contains(node)) { - list.add(new RequestAndCompletionHandler( - currentTimeMs, - node, - AddPartitionsToTxnRequest.Builder.forBroker(transactionDataAndCallbacks.transactionData), - new AddPartitionsToTxnHandler(node, transactionDataAndCallbacks) - )) - - removedNodes.add(node) - } - } - removedNodes.foreach { node => - inflightNodes.add(node) - nodesToTransactions.remove(node) - } - } - list - } - - override def shutdown(): Unit = { - super.shutdown() - metricsGroup.removeMetric(VerificationFailureRateMetricName) - metricsGroup.removeMetric(VerificationTimeMsMetricName) - } - -} diff --git a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala index a2c2bd4d80b35..1398a8ad7c17a 100644 --- a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala +++ b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala @@ -18,6 +18,7 @@ package kafka.server import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.locks.ReentrantLock import java.util.{Collections, Properties} import kafka.coordinator.transaction.TransactionCoordinator import kafka.utils.Logging @@ -29,11 +30,13 @@ import org.apache.kafka.common.message.CreateTopicsRequestData import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicConfig, CreatableTopicConfigCollection} import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{CreateTopicsRequest, RequestContext, RequestHeader} +import org.apache.kafka.common.requests.{AbstractResponse, CreateTopicsRequest, CreateTopicsResponse, EnvelopeResponse, RequestContext, RequestHeader} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} +import org.apache.kafka.server.quota.ControllerMutationQuota +import org.apache.kafka.common.utils.Time import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ @@ -49,21 +52,96 @@ trait AutoTopicCreationManager { def createStreamsInternalTopics( topics: Map[String, CreatableTopic], - requestContext: RequestContext + requestContext: RequestContext, + timeoutMs: Long ): Unit + def getStreamsInternalTopicCreationErrors( + topicNames: Set[String], + currentTimeMs: Long + ): Map[String, String] + + def close(): Unit = {} + } +/** + * Thread-safe cache that stores topic creation errors with per-entry expiration. + * - Expiration: maintained by a min-heap (priority queue) on expiration time + * - Capacity: enforced by evicting entries with earliest expiration time (not LRU) + * - Updates: old entries remain in queue but are ignored via reference equality check + */ +private[server] class ExpiringErrorCache(maxSize: Int, time: Time) { + + private case class Entry(topicName: String, errorMessage: String, expirationTimeMs: Long) + + private val byTopic = new ConcurrentHashMap[String, Entry]() + private val expiryQueue = new java.util.PriorityQueue[Entry](11, new java.util.Comparator[Entry] { + override def compare(a: Entry, b: Entry): Int = java.lang.Long.compare(a.expirationTimeMs, b.expirationTimeMs) + }) + private val lock = new ReentrantLock() + + def put(topicName: String, errorMessage: String, ttlMs: Long): Unit = { + lock.lock() + try { + val currentTimeMs = time.milliseconds() + val expirationTimeMs = currentTimeMs + ttlMs + val entry = Entry(topicName, errorMessage, expirationTimeMs) + byTopic.put(topicName, entry) + expiryQueue.add(entry) + + // Clean up expired entries and enforce capacity + while (!expiryQueue.isEmpty && + (expiryQueue.peek().expirationTimeMs <= currentTimeMs || byTopic.size() > maxSize)) { + val evicted = expiryQueue.poll() + val current = byTopic.get(evicted.topicName) + if (current != null && (current eq evicted)) { + byTopic.remove(evicted.topicName) + } + } + } finally { + lock.unlock() + } + } + + def getErrorsForTopics(topicNames: Set[String], currentTimeMs: Long): Map[String, String] = { + val result = mutable.Map.empty[String, String] + topicNames.foreach { topicName => + val entry = byTopic.get(topicName) + if (entry != null && entry.expirationTimeMs > currentTimeMs) { + result.put(topicName, entry.errorMessage) + } + } + result.toMap + } + + private[server] def clear(): Unit = { + lock.lock() + try { + byTopic.clear() + expiryQueue.clear() + } finally { + lock.unlock() + } + } +} + + class DefaultAutoTopicCreationManager( config: KafkaConfig, channelManager: NodeToControllerChannelManager, groupCoordinator: GroupCoordinator, txnCoordinator: TransactionCoordinator, - shareCoordinator: Option[ShareCoordinator] + shareCoordinator: ShareCoordinator, + time: Time, + topicErrorCacheCapacity: Int = 1000 ) extends AutoTopicCreationManager with Logging { private val inflightTopics = Collections.newSetFromMap(new ConcurrentHashMap[String, java.lang.Boolean]()) + // Hardcoded default capacity; can be overridden in tests via constructor param + private val topicCreationErrorCache = new ExpiringErrorCache(topicErrorCacheCapacity, time) + /** * Initiate auto topic creation for the given topics. * @@ -92,25 +170,21 @@ class DefaultAutoTopicCreationManager( override def createStreamsInternalTopics( topics: Map[String, CreatableTopic], - requestContext: RequestContext + requestContext: RequestContext, + timeoutMs: Long ): Unit = { - - for ((_, creatableTopic) <- topics) { - if (creatableTopic.numPartitions() == -1) { - creatableTopic - .setNumPartitions(config.numPartitions) - } - if (creatableTopic.replicationFactor() == -1) { - creatableTopic - .setReplicationFactor(config.defaultReplicationFactor.shortValue) - } - } - if (topics.nonEmpty) { - sendCreateTopicRequest(topics, Some(requestContext)) + sendCreateTopicRequestWithErrorCaching(topics, Some(requestContext), timeoutMs) } } + override def getStreamsInternalTopicCreationErrors( + topicNames: Set[String], + currentTimeMs: Long + ): Map[String, String] = { + topicCreationErrorCache.getErrorsForTopics(topicNames, currentTimeMs) + } + private def sendCreateTopicRequest( creatableTopics: Map[String, CreatableTopic], requestContext: Option[RequestContext] @@ -124,6 +198,22 @@ class DefaultAutoTopicCreationManager( .setTopics(topicsToCreate) ) + // Capture request header information for proper envelope response parsing + val requestHeaderForParsing = requestContext.map { context => + val requestVersion = + channelManager.controllerApiVersions.toScala match { + case None => + ApiKeys.CREATE_TOPICS.latestVersion() + case Some(nodeApiVersions) => + nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) + } + + new RequestHeader(ApiKeys.CREATE_TOPICS, + requestVersion, + context.clientId, + context.correlationId) + } + val requestCompletionHandler = new ControllerRequestCompletionHandler { override def onTimeout(): Unit = { clearInflightRequests(creatableTopics) @@ -137,31 +227,58 @@ class DefaultAutoTopicCreationManager( } else if (response.versionMismatch() != null) { warn(s"Auto topic creation failed for ${creatableTopics.keys} with invalid version exception") } else { + if (response.hasResponse) { + response.responseBody() match { + case envelopeResponse: EnvelopeResponse => + // Unwrap the envelope response to get the actual CreateTopicsResponse + val envelopeError = envelopeResponse.error() + if (envelopeError != Errors.NONE) { + warn(s"Auto topic creation failed for ${creatableTopics.keys} with envelope error: ${envelopeError}") + } else { + requestHeaderForParsing match { + case Some(requestHeader) => + try { + // Use the captured request header for proper envelope response parsing + val createTopicsResponse = AbstractResponse.parseResponse( + envelopeResponse.responseData(), requestHeader).asInstanceOf[CreateTopicsResponse] + + createTopicsResponse.data().topics().forEach(topicResult => { + val error = Errors.forCode(topicResult.errorCode) + if (error != Errors.NONE) { + warn(s"Auto topic creation failed for ${topicResult.name} with error '${error.name}': ${topicResult.errorMessage}") + } + }) + } catch { + case e: Exception => + warn(s"Failed to parse envelope response for auto topic creation of ${creatableTopics.keys}", e) + } + case None => + warn(s"Cannot parse envelope response without original request header information") + } + } + case createTopicsResponse: CreateTopicsResponse => + createTopicsResponse.data().topics().forEach(topicResult => { + val error = Errors.forCode(topicResult.errorCode) + if (error != Errors.NONE) { + warn(s"Auto topic creation failed for ${topicResult.name} with error '${error.name}': ${topicResult.errorMessage}") + } + }) + case other => + warn(s"Auto topic creation request received unexpected response type: ${other.getClass.getSimpleName}") + } + } debug(s"Auto topic creation completed for ${creatableTopics.keys} with response ${response.responseBody}.") } } } - val request = requestContext.map { context => - val requestVersion = - channelManager.controllerApiVersions.toScala match { - case None => - // We will rely on the Metadata request to be retried in the case - // that the latest version is not usable by the controller. - ApiKeys.CREATE_TOPICS.latestVersion() - case Some(nodeApiVersions) => - nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) - } - - // Borrow client information such as client id and correlation id from the original request, - // in order to correlate the create request with the original metadata request. - val requestHeader = new RequestHeader(ApiKeys.CREATE_TOPICS, - requestVersion, - context.clientId, - context.correlationId) - ForwardingManager.buildEnvelopeRequest(context, - createTopicsRequest.build(requestVersion).serializeWithHeader(requestHeader)) - }.getOrElse(createTopicsRequest) + val request = (requestContext, requestHeaderForParsing) match { + case (Some(context), Some(requestHeader)) => + ForwardingManager.buildEnvelopeRequest(context, + createTopicsRequest.build(requestHeader.apiVersion()).serializeWithHeader(requestHeader)) + case _ => + createTopicsRequest + } channelManager.sendRequest(request, requestCompletionHandler) @@ -198,15 +315,11 @@ class DefaultAutoTopicCreationManager( .setConfigs(convertToTopicConfigCollections( txnCoordinator.transactionTopicConfigs)) case SHARE_GROUP_STATE_TOPIC_NAME => - val props = shareCoordinator match { - case Some(coordinator) => coordinator.shareGroupStateTopicConfigs() - case None => new Properties() - } new CreatableTopic() .setName(topic) .setNumPartitions(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions()) .setReplicationFactor(config.shareCoordinatorConfig.shareCoordinatorStateTopicReplicationFactor()) - .setConfigs(convertToTopicConfigCollections(props)) + .setConfigs(convertToTopicConfigCollections(shareCoordinator.shareGroupStateTopicConfigs())) case topicName => new CreatableTopic() .setName(topicName) @@ -266,4 +379,133 @@ class DefaultAutoTopicCreationManager( (creatableTopics, uncreatableTopics) } + + private def sendCreateTopicRequestWithErrorCaching( + creatableTopics: Map[String, CreatableTopic], + requestContext: Option[RequestContext], + timeoutMs: Long + ): Seq[MetadataResponseTopic] = { + val topicsToCreate = new CreateTopicsRequestData.CreatableTopicCollection(creatableTopics.size) + topicsToCreate.addAll(creatableTopics.values.asJavaCollection) + + val createTopicsRequest = new CreateTopicsRequest.Builder( + new CreateTopicsRequestData() + .setTimeoutMs(config.requestTimeoutMs) + .setTopics(topicsToCreate) + ) + + // Capture request header information for proper envelope response parsing + val requestHeaderForParsing = requestContext.map { context => + val requestVersion = + channelManager.controllerApiVersions.toScala match { + case None => + ApiKeys.CREATE_TOPICS.latestVersion() + case Some(nodeApiVersions) => + nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) + } + + new RequestHeader(ApiKeys.CREATE_TOPICS, + requestVersion, + context.clientId, + context.correlationId) + } + + val requestCompletionHandler = new ControllerRequestCompletionHandler { + override def onTimeout(): Unit = { + clearInflightRequests(creatableTopics) + debug(s"Auto topic creation timed out for ${creatableTopics.keys}.") + cacheTopicCreationErrors(creatableTopics.keys.toSet, "Auto topic creation timed out.", timeoutMs) + } + + override def onComplete(response: ClientResponse): Unit = { + clearInflightRequests(creatableTopics) + if (response.authenticationException() != null) { + val authException = response.authenticationException() + warn(s"Auto topic creation failed for ${creatableTopics.keys} with authentication exception: ${authException.getMessage}") + cacheTopicCreationErrors(creatableTopics.keys.toSet, authException.getMessage, timeoutMs) + } else if (response.versionMismatch() != null) { + val versionException = response.versionMismatch() + warn(s"Auto topic creation failed for ${creatableTopics.keys} with version mismatch exception: ${versionException.getMessage}") + cacheTopicCreationErrors(creatableTopics.keys.toSet, versionException.getMessage, timeoutMs) + } else { + if (response.hasResponse) { + response.responseBody() match { + case envelopeResponse: EnvelopeResponse => + // Unwrap the envelope response to get the actual CreateTopicsResponse + val envelopeError = envelopeResponse.error() + if (envelopeError != Errors.NONE) { + warn(s"Auto topic creation failed for ${creatableTopics.keys} with envelope error: ${envelopeError}") + cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Envelope error: ${envelopeError}", timeoutMs) + } else { + requestHeaderForParsing match { + case Some(requestHeader) => + try { + // Use the captured request header for proper envelope response parsing + val createTopicsResponse = AbstractResponse.parseResponse( + envelopeResponse.responseData(), requestHeader).asInstanceOf[CreateTopicsResponse] + + cacheTopicCreationErrorsFromResponse(createTopicsResponse, timeoutMs) + } catch { + case e: Exception => + warn(s"Failed to parse envelope response for auto topic creation of ${creatableTopics.keys}", e) + cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Response parsing error: ${e.getMessage}", timeoutMs) + } + case None => + warn(s"Cannot parse envelope response without original request header information") + cacheTopicCreationErrors(creatableTopics.keys.toSet, "Missing request header for envelope parsing", timeoutMs) + } + } + case createTopicsResponse: CreateTopicsResponse => + cacheTopicCreationErrorsFromResponse(createTopicsResponse, timeoutMs) + case unexpectedResponse => + warn(s"Auto topic creation request received unexpected response type: ${unexpectedResponse.getClass.getSimpleName}") + cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Unexpected response type: ${unexpectedResponse.getClass.getSimpleName}", timeoutMs) + } + debug(s"Auto topic creation completed for ${creatableTopics.keys} with response ${response.responseBody}.") + } + } + } + } + + val request = (requestContext, requestHeaderForParsing) match { + case (Some(context), Some(requestHeader)) => + ForwardingManager.buildEnvelopeRequest(context, + createTopicsRequest.build(requestHeader.apiVersion()).serializeWithHeader(requestHeader)) + case _ => + createTopicsRequest + } + + channelManager.sendRequest(request, requestCompletionHandler) + + val creatableTopicResponses = creatableTopics.keySet.toSeq.map { topic => + new MetadataResponseTopic() + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) + .setName(topic) + .setIsInternal(Topic.isInternal(topic)) + } + + creatableTopicResponses + } + + private def cacheTopicCreationErrors(topicNames: Set[String], errorMessage: String, ttlMs: Long): Unit = { + topicNames.foreach { topicName => + topicCreationErrorCache.put(topicName, errorMessage, ttlMs) + } + } + + private def cacheTopicCreationErrorsFromResponse(response: CreateTopicsResponse, ttlMs: Long): Unit = { + response.data().topics().forEach { topicResult => + if (topicResult.errorCode() != Errors.NONE.code()) { + val errorMessage = Option(topicResult.errorMessage()) + .filter(_.nonEmpty) + .getOrElse(Errors.forCode(topicResult.errorCode()).message()) + topicCreationErrorCache.put(topicResult.name(), errorMessage, ttlMs) + debug(s"Cached topic creation error for ${topicResult.name()}: $errorMessage") + } + } + } + + override def close(): Unit = { + topicCreationErrorCache.clear() + } } diff --git a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala index ae3a5d8c3c748..2368ebc21ccd2 100644 --- a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala +++ b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala @@ -28,7 +28,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{BrokerHeartbeatRequest, BrokerHeartbeatResponse, BrokerRegistrationRequest, BrokerRegistrationResponse} import org.apache.kafka.metadata.{BrokerState, VersionRange} import org.apache.kafka.queue.EventQueue.DeadlineFunction -import org.apache.kafka.common.utils.{ExponentialBackoff, LogContext, Time} +import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.queue.{EventQueue, KafkaEventQueue} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} @@ -89,18 +89,6 @@ class BrokerLifecycleManager( private val initialTimeoutNs = MILLISECONDS.toNanos(config.initialRegistrationTimeoutMs.longValue()) - /** - * The exponential backoff to use for resending communication. - */ - private val resendExponentialBackoff = - new ExponentialBackoff(100, 2, config.brokerSessionTimeoutMs.toLong / 2, 0.02) - - /** - * The number of times we've tried and failed to communicate. This variable can only be - * read or written from the BrokerToControllerRequestThread. - */ - private var failedAttempts = 0L - /** * The broker incarnation ID. This ID uniquely identifies each time we start the broker */ @@ -440,7 +428,6 @@ class BrokerLifecycleManager( val message = response.responseBody().asInstanceOf[BrokerRegistrationResponse] val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { - failedAttempts = 0 _brokerEpoch = message.data().brokerEpoch() registered = true initialRegistrationSucceeded = true @@ -514,7 +501,6 @@ class BrokerLifecycleManager( val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { val responseData = message.data() - failedAttempts = 0 currentOfflineDirs.foreach(cur => offlineDirs.put(cur, true)) _state match { case BrokerState.STARTING => @@ -577,10 +563,9 @@ class BrokerLifecycleManager( } private def scheduleNextCommunicationAfterFailure(): Unit = { - val delayMs = resendExponentialBackoff.backoff(failedAttempts) - failedAttempts = failedAttempts + 1 nextSchedulingShouldBeImmediate = false // never immediately reschedule after a failure - scheduleNextCommunication(NANOSECONDS.convert(delayMs, MILLISECONDS)) + scheduleNextCommunication(NANOSECONDS.convert( + config.brokerHeartbeatIntervalMs.longValue() , MILLISECONDS)) } private def scheduleNextCommunicationAfterSuccess(): Unit = { diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 8f9b983cb782f..a9217c4d0239b 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -17,7 +17,7 @@ package kafka.server -import kafka.coordinator.group.{CoordinatorLoaderImpl, CoordinatorPartitionWriter} +import kafka.coordinator.group.CoordinatorPartitionWriter import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager import kafka.network.SocketServer @@ -34,16 +34,16 @@ import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} -import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord +import org.apache.kafka.coordinator.common.runtime.{CoordinatorLoaderImpl, CoordinatorRecord} import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{GroupConfigManager, GroupCoordinator, GroupCoordinatorRecordSerde, GroupCoordinatorService} import org.apache.kafka.coordinator.share.metrics.{ShareCoordinatorMetrics, ShareCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorRecordSerde, ShareCoordinatorService} import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} -import org.apache.kafka.metadata.{BrokerState, ListenerInfo} -import org.apache.kafka.metadata.publisher.AclPublisher -import org.apache.kafka.security.CredentialProvider +import org.apache.kafka.metadata.{BrokerState, ListenerInfo, MetadataVersionConfigValidator} +import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} +import org.apache.kafka.security.{CredentialProvider, DelegationTokenManager} import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.{ApiMessageAndVersion, DirectoryEventHandler, NodeToControllerChannelManager, TopicIdPartition} import org.apache.kafka.server.config.{ConfigType, DelegationTokenManagerConfigs} @@ -54,14 +54,14 @@ import org.apache.kafka.server.share.persister.{DefaultStatePersister, NoOpState import org.apache.kafka.server.share.session.ShareSessionCache import org.apache.kafka.server.util.timer.{SystemTimer, SystemTimerReaper} import org.apache.kafka.server.util.{Deadline, FutureUtils, KafkaScheduler} -import org.apache.kafka.server.{AssignmentsManager, BrokerFeatures, ClientMetricsManager, DefaultApiVersionManager, DelayedActionQueue, DelegationTokenManager, ProcessRole} +import org.apache.kafka.server.{AssignmentsManager, BrokerFeatures, ClientMetricsManager, DefaultApiVersionManager, DelayedActionQueue, ProcessRole} +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.time.Duration import java.util import java.util.Optional -import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.{Condition, ReentrantLock} import java.util.concurrent.{CompletableFuture, ExecutionException, TimeUnit, TimeoutException} import scala.collection.Map @@ -95,8 +95,6 @@ class BrokerServer( private var assignmentsManager: AssignmentsManager = _ - private val isShuttingDown = new AtomicBoolean(false) - val lock: ReentrantLock = new ReentrantLock() val awaitShutdownCond: Condition = lock.newCondition() var status: ProcessStatus = SHUTDOWN @@ -126,7 +124,7 @@ class BrokerServer( var transactionCoordinator: TransactionCoordinator = _ - var shareCoordinator: Option[ShareCoordinator] = None + var shareCoordinator: ShareCoordinator = _ var clientToControllerChannelManager: NodeToControllerChannelManager = _ @@ -171,10 +169,7 @@ class BrokerServer( info(s"Transition from $status to $to") status = to - if (to == SHUTTING_DOWN) { - isShuttingDown.set(true) - } else if (to == SHUTDOWN) { - isShuttingDown.set(false) + if (to == SHUTDOWN) { awaitShutdownCond.signalAll() } } finally { @@ -197,7 +192,7 @@ class BrokerServer( config.dynamicConfig.initialize(Some(clientMetricsReceiverPlugin)) quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-", ProcessRole.BrokerRole.toString) - DynamicBrokerConfig.readDynamicBrokerConfigsFromSnapshot(raftManager, config, quotaManagers) + DynamicBrokerConfig.readDynamicBrokerConfigsFromSnapshot(raftManager, config, quotaManagers, logContext) /* start scheduler */ kafkaScheduler = new KafkaScheduler(config.backgroundThreads) @@ -259,7 +254,15 @@ class BrokerServer( Optional.of(clientMetricsManager) ) - val connectionDisconnectListeners = Seq(clientMetricsManager.connectionDisconnectListener()) + val shareFetchSessionCache : ShareSessionCache = new ShareSessionCache( + config.shareGroupConfig.shareGroupMaxShareSessions() + ) + + val connectionDisconnectListeners = Seq( + clientMetricsManager.connectionDisconnectListener(), + shareFetchSessionCache.connectionDisconnectListener() + ) + // Create and start the socket server acceptor threads so that the bound port is known. // Delay starting processors until the end of the initialization sequence to ensure // that credentials have been loaded before processing authentications. @@ -274,7 +277,7 @@ class BrokerServer( clientQuotaMetadataManager = new ClientQuotaMetadataManager(quotaManagers, socketServer.connectionQuotas) val listenerInfo = ListenerInfo.create(Optional.of(config.interBrokerListenerName.value()), - config.effectiveAdvertisedBrokerListeners.map(_.toPublic()).asJava). + config.effectiveAdvertisedBrokerListeners.asJava). withWildcardHostnamesResolved(). withEphemeralPortsCorrected(name => socketServer.boundPort(new ListenerName(name))) @@ -345,8 +348,6 @@ class BrokerServer( logDirFailureChannel = logDirFailureChannel, alterPartitionManager = alterPartitionManager, brokerTopicStats = brokerTopicStats, - isShuttingDown = isShuttingDown, - threadNamePrefix = None, // The ReplicaManager only runs on the broker, and already includes the ID in thread names. delayedRemoteFetchPurgatoryParam = None, brokerEpochSupplier = () => lifecycleManager.brokerEpoch, addPartitionsToTxnManager = Some(addPartitionsToTxnManager), @@ -386,7 +387,7 @@ class BrokerServer( autoTopicCreationManager = new DefaultAutoTopicCreationManager( config, clientToControllerChannelManager, groupCoordinator, - transactionCoordinator, shareCoordinator) + transactionCoordinator, shareCoordinator, time) dynamicConfigHandlers = Map[ConfigType, ConfigHandler]( ConfigType.TOPIC -> new TopicConfigHandler(replicaManager, config, quotaManagers), @@ -403,7 +404,7 @@ class BrokerServer( config, "heartbeat", s"broker-${config.nodeId}-", - config.brokerSessionTimeoutMs / 2 // KAFKA-14392 + config.brokerHeartbeatIntervalMs ) lifecycleManager.start( () => sharedServer.loader.lastAppliedOffset(), @@ -426,10 +427,6 @@ class BrokerServer( )) val fetchManager = new FetchManager(Time.SYSTEM, new FetchSessionCache(fetchSessionCacheShards)) - val shareFetchSessionCache : ShareSessionCache = new ShareSessionCache( - config.shareGroupConfig.shareGroupMaxGroups * config.groupCoordinatorConfig.shareGroupMaxSize, - KafkaBroker.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS) - sharePartitionManager = new SharePartitionManager( replicaManager, time, @@ -437,6 +434,7 @@ class BrokerServer( config.shareGroupConfig.shareGroupRecordLockDurationMs, config.shareGroupConfig.shareGroupDeliveryCountLimit, config.shareGroupConfig.shareGroupPartitionMaxRecordLocks, + config.remoteLogManagerConfig.remoteFetchMaxWaitMs().toLong, persister, groupConfigManager, brokerTopicStats @@ -471,7 +469,10 @@ class BrokerServer( socketServer.dataPlaneRequestChannel, dataPlaneRequestProcessor, time, config.numIoThreads, "RequestHandlerAvgIdlePercent") - metadataPublishers.add(new MetadataVersionConfigValidator(config, sharedServer.metadataPublishingFaultHandler)) + metadataPublishers.add(new MetadataVersionConfigValidator(config.brokerId, + () => config.processRoles.contains(ProcessRole.BrokerRole) && config.logDirs().size() > 1, + sharedServer.metadataPublishingFaultHandler + )) brokerMetadataPublisher = new BrokerMetadataPublisher(config, metadataCache, logManager, @@ -479,6 +480,7 @@ class BrokerServer( groupCoordinator, transactionCoordinator, shareCoordinator, + sharePartitionManager, new DynamicConfigPublisher( config, sharedServer.metadataPublishingFaultHandler, @@ -498,12 +500,12 @@ class BrokerServer( quotaManagers, ), new ScramPublisher( - config, + config.nodeId, sharedServer.metadataPublishingFaultHandler, "broker", credentialProvider), new DelegationTokenPublisher( - config, + config.nodeId, sharedServer.metadataPublishingFaultHandler, "broker", tokenManager), @@ -611,9 +613,11 @@ class BrokerServer( ) val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( time, - replicaManager, + tp => replicaManager.getLog(tp).toJava, + tp => replicaManager.getLogEndOffset(tp).map(Long.box).toJava, serde, - config.groupCoordinatorConfig.offsetsLoadBufferSize + config.groupCoordinatorConfig.offsetsLoadBufferSize, + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS ) val writer = new CoordinatorPartitionWriter( replicaManager @@ -631,41 +635,38 @@ class BrokerServer( .build() } - private def createShareCoordinator(): Option[ShareCoordinator] = { - if (config.shareGroupConfig.isShareGroupEnabled && - config.shareGroupConfig.shareGroupPersisterClassName().nonEmpty) { - val time = Time.SYSTEM - val timer = new SystemTimerReaper( - "share-coordinator-reaper", - new SystemTimer("share-coordinator") - ) + private def createShareCoordinator(): ShareCoordinator = { + val time = Time.SYSTEM + val timer = new SystemTimerReaper( + "share-coordinator-reaper", + new SystemTimer("share-coordinator") + ) - val serde = new ShareCoordinatorRecordSerde - val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( - time, - replicaManager, - serde, - config.shareCoordinatorConfig.shareCoordinatorLoadBufferSize() - ) - val writer = new CoordinatorPartitionWriter( - replicaManager - ) - Some(new ShareCoordinatorService.Builder(config.brokerId, config.shareCoordinatorConfig) - .withTimer(timer) - .withTime(time) - .withLoader(loader) - .withWriter(writer) - .withCoordinatorRuntimeMetrics(new ShareCoordinatorRuntimeMetrics(metrics)) - .withCoordinatorMetrics(new ShareCoordinatorMetrics(metrics)) - .build()) - } else { - None - } + val serde = new ShareCoordinatorRecordSerde + val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( + time, + tp => replicaManager.getLog(tp).toJava, + tp => replicaManager.getLogEndOffset(tp).map(Long.box).toJava, + serde, + config.shareCoordinatorConfig.shareCoordinatorLoadBufferSize(), + CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS + ) + val writer = new CoordinatorPartitionWriter( + replicaManager + ) + new ShareCoordinatorService.Builder(config.brokerId, config.shareCoordinatorConfig) + .withTimer(timer) + .withTime(time) + .withLoader(loader) + .withWriter(writer) + .withCoordinatorRuntimeMetrics(new ShareCoordinatorRuntimeMetrics(metrics)) + .withCoordinatorMetrics(new ShareCoordinatorMetrics(metrics)) + .withShareGroupEnabledConfigSupplier(() => config.shareGroupConfig.isShareGroupEnabled) + .build() } private def createShareStatePersister(): Persister = { - if (config.shareGroupConfig.isShareGroupEnabled && - config.shareGroupConfig.shareGroupPersisterClassName.nonEmpty) { + if (config.shareGroupConfig.shareGroupPersisterClassName.nonEmpty) { val klass = Utils.loadClass(config.shareGroupConfig.shareGroupPersisterClassName, classOf[Object]).asInstanceOf[Class[Persister]] if (klass.getName.equals(classOf[DefaultStatePersister].getName)) { @@ -673,7 +674,7 @@ class BrokerServer( .newInstance( new PersisterStateManager( NetworkUtils.buildNetworkClient("Persister", config, metrics, Time.SYSTEM, new LogContext(s"[Persister broker=${config.brokerId}]")), - new ShareCoordinatorMetadataCacheHelperImpl(metadataCache, key => shareCoordinator.get.partitionFor(key), config.interBrokerListenerName), + new ShareCoordinatorMetadataCacheHelperImpl(metadataCache, key => shareCoordinator.partitionFor(key), config.interBrokerListenerName), Time.SYSTEM, new SystemTimerReaper( "persister-state-manager-reaper", @@ -700,10 +701,7 @@ class BrokerServer( val listenerName = config.remoteLogManagerConfig.remoteLogMetadataManagerListenerName() val endpoint = if (listenerName != null) { Some(listenerInfo.listeners().values().stream - .filter(e => - e.listenerName().isPresent && - ListenerName.normalised(e.listenerName().get()).equals(ListenerName.normalised(listenerName)) - ) + .filter(e => ListenerName.normalised(e.listener()).equals(ListenerName.normalised(listenerName))) .findFirst() .orElseThrow(() => new ConfigException(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, listenerName, "Should be set as a listener name within valid broker listener name list: " + listenerInfo.listeners().values()))) @@ -711,7 +709,7 @@ class BrokerServer( None } - val rlm = new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.head, clusterId, time, + val rlm = new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.get(0), clusterId, time, (tp: TopicPartition) => logManager.getLog(tp).toJava, (tp: TopicPartition, remoteLogStartOffset: java.lang.Long) => { logManager.getLog(tp).foreach { log => @@ -784,8 +782,11 @@ class BrokerServer( CoreUtils.swallow(groupConfigManager.close(), this) if (groupCoordinator != null) CoreUtils.swallow(groupCoordinator.shutdown(), this) - if (shareCoordinator.isDefined) - CoreUtils.swallow(shareCoordinator.get.shutdown(), this) + if (shareCoordinator != null) + CoreUtils.swallow(shareCoordinator.shutdown(), this) + + if (autoTopicCreationManager != null) + CoreUtils.swallow(autoTopicCreationManager.close(), this) if (assignmentsManager != null) CoreUtils.swallow(assignmentsManager.close(), this) @@ -823,8 +824,6 @@ class BrokerServer( if (persister != null) CoreUtils.swallow(persister.stop(), this) - isShuttingDown.set(false) - if (lifecycleManager != null) CoreUtils.swallow(lifecycleManager.close(), this) diff --git a/core/src/main/scala/kafka/server/ClientQuotaManager.scala b/core/src/main/scala/kafka/server/ClientQuotaManager.scala deleted file mode 100644 index 894e410fc637d..0000000000000 --- a/core/src/main/scala/kafka/server/ClientQuotaManager.scala +++ /dev/null @@ -1,652 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server - -import java.{lang, util} -import java.util.concurrent.{ConcurrentHashMap, DelayQueue, TimeUnit} -import java.util.concurrent.locks.ReentrantReadWriteLock -import java.util.function.Consumer -import kafka.network.RequestChannel -import kafka.server.ClientQuotaManager._ -import kafka.utils.Logging -import org.apache.kafka.common.internals.Plugin -import org.apache.kafka.common.{Cluster, MetricName} -import org.apache.kafka.common.metrics._ -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.metrics.stats.{Avg, CumulativeSum, Rate} -import org.apache.kafka.common.security.auth.KafkaPrincipal -import org.apache.kafka.common.utils.{Sanitizer, Time} -import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaEntity, ClientQuotaType, QuotaType, QuotaUtils, SensorAccess, ThrottleCallback, ThrottledChannel} -import org.apache.kafka.server.util.ShutdownableThread -import org.apache.kafka.network.Session - -import scala.jdk.CollectionConverters._ - -/** - * Represents the sensors aggregated per client - * @param metricTags Quota metric tags for the client - * @param quotaSensor @Sensor that tracks the quota - * @param throttleTimeSensor @Sensor that tracks the throttle time - */ -case class ClientSensors(metricTags: Map[String, String], quotaSensor: Sensor, throttleTimeSensor: Sensor) - -object QuotaTypes { - val NoQuotas = 0 - val ClientIdQuotaEnabled = 1 - val UserQuotaEnabled = 2 - val UserClientIdQuotaEnabled = 4 - val CustomQuotas = 8 // No metric update optimizations are used with custom quotas -} - -object ClientQuotaManager { - // Purge sensors after 1 hour of inactivity - val InactiveSensorExpirationTimeSeconds = 3600 - private val DefaultName = "" - val DefaultClientIdQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(None, Some(DefaultClientIdEntity)) - val DefaultUserQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(Some(DefaultUserEntity), None) - val DefaultUserClientIdQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(Some(DefaultUserEntity), Some(DefaultClientIdEntity)) - - sealed trait BaseUserEntity extends ClientQuotaEntity.ConfigEntity - - case class UserEntity(sanitizedUser: String) extends BaseUserEntity { - override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.USER - override def name: String = Sanitizer.desanitize(sanitizedUser) - override def toString: String = s"user $sanitizedUser" - } - - case class ClientIdEntity(clientId: String) extends ClientQuotaEntity.ConfigEntity { - override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.CLIENT_ID - override def name: String = clientId - override def toString: String = s"client-id $clientId" - } - - case object DefaultUserEntity extends BaseUserEntity { - override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.DEFAULT_USER - override def name: String = DefaultName - override def toString: String = "default user" - } - - case object DefaultClientIdEntity extends ClientQuotaEntity.ConfigEntity { - override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.DEFAULT_CLIENT_ID - override def name: String = DefaultName - override def toString: String = "default client-id" - } - - case class KafkaQuotaEntity(userEntity: Option[BaseUserEntity], - clientIdEntity: Option[ClientQuotaEntity.ConfigEntity]) extends ClientQuotaEntity { - override def configEntities: util.List[ClientQuotaEntity.ConfigEntity] = - (userEntity.toList ++ clientIdEntity.toList).asJava - - def sanitizedUser: String = userEntity.map { - case entity: UserEntity => entity.sanitizedUser - case DefaultUserEntity => DefaultName - }.getOrElse("") - - def clientId: String = clientIdEntity.map(_.name).getOrElse("") - - override def toString: String = { - val user = userEntity.map(_.toString).getOrElse("") - val clientId = clientIdEntity.map(_.toString).getOrElse("") - s"$user $clientId".trim - } - } - - object DefaultTags { - val User = "user" - val ClientId = "client-id" - } -} - -/** - * Helper class that records per-client metrics. It is also responsible for maintaining Quota usage statistics - * for all clients. - *

    - * Quotas can be set at , user or client-id levels. For a given client connection, - * the most specific quota matching the connection will be applied. For example, if both a - * and a user quota match a connection, the quota will be used. Otherwise, user quota takes - * precedence over client-id quota. The order of precedence is: - *

      - *
    • /config/users//clients/ - *
    • /config/users//clients/ - *
    • /config/users/ - *
    • /config/users//clients/ - *
    • /config/users//clients/ - *
    • /config/users/ - *
    • /config/clients/ - *
    • /config/clients/ - *
    - * Quota limits including defaults may be updated dynamically. The implementation is optimized for the case - * where a single level of quotas is configured. - * - * @param config @ClientQuotaManagerConfig quota configs - * @param metrics @Metrics Metrics instance - * @param quotaType Quota type of this quota manager - * @param time @Time object to use - * @param threadNamePrefix The thread prefix to use - * @param clientQuotaCallbackPlugin An optional @ClientQuotaCallback and - * wrap it in a {@link org.apache.kafka.common.internals.Plugin} - */ -class ClientQuotaManager(private val config: ClientQuotaManagerConfig, - private val metrics: Metrics, - private val quotaType: QuotaType, - private val time: Time, - private val threadNamePrefix: String, - private val clientQuotaCallbackPlugin: Option[Plugin[ClientQuotaCallback]] = None) extends Logging { - - private val lock = new ReentrantReadWriteLock() - private val sensorAccessor = new SensorAccess(lock, metrics) - private val quotaCallback = clientQuotaCallbackPlugin match { - case Some(plugin) => plugin.get() - case None => new DefaultQuotaCallback - } - private val clientQuotaType = QuotaType.toClientQuotaType(quotaType) - - @volatile - private var quotaTypesEnabled = clientQuotaCallbackPlugin match { - case Some(_) => QuotaTypes.CustomQuotas - case None => QuotaTypes.NoQuotas - } - - private val delayQueueSensor = metrics.sensor(quotaType.toString + "-delayQueue") - delayQueueSensor.add(metrics.metricName("queue-size", quotaType.toString, - "Tracks the size of the delay queue"), new CumulativeSum()) - - private val delayQueue = new DelayQueue[ThrottledChannel]() - private[server] val throttledChannelReaper = new ThrottledChannelReaper(delayQueue, threadNamePrefix) - start() // Use start method to keep spotbugs happy - private def start(): Unit = { - throttledChannelReaper.start() - } - - /** - * Reaper thread that triggers channel unmute callbacks on all throttled channels - * @param delayQueue DelayQueue to dequeue from - */ - class ThrottledChannelReaper(delayQueue: DelayQueue[ThrottledChannel], prefix: String) extends ShutdownableThread( - s"${prefix}ThrottledChannelReaper-$quotaType", false) { - - override def doWork(): Unit = { - val throttledChannel: ThrottledChannel = delayQueue.poll(1, TimeUnit.SECONDS) - if (throttledChannel != null) { - // Decrement the size of the delay queue - delayQueueSensor.record(-1) - // Notify the socket server that throttling is done for this channel, so that it can try to unmute the channel. - throttledChannel.notifyThrottlingDone() - } - } - } - - /** - * Returns true if any quotas are enabled for this quota manager. This is used - * to determine if quota related metrics should be created. - * Note: If any quotas (static defaults, dynamic defaults or quota overrides) have - * been configured for this broker at any time for this quota type, quotasEnabled will - * return true until the next broker restart, even if all quotas are subsequently deleted. - */ - def quotasEnabled: Boolean = quotaTypesEnabled != QuotaTypes.NoQuotas - - /** - * See {recordAndGetThrottleTimeMs}. - */ - def maybeRecordAndGetThrottleTimeMs(request: RequestChannel.Request, value: Double, timeMs: Long): Int = { - maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId, value, timeMs) - } - - /** - * See {recordAndGetThrottleTimeMs}. - */ - def maybeRecordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { - // Record metrics only if quotas are enabled. - if (quotasEnabled) { - recordAndGetThrottleTimeMs(session, clientId, value, timeMs) - } else { - 0 - } - } - - /** - * Records that a user/clientId accumulated or would like to accumulate the provided amount at the - * specified time, returns throttle time in milliseconds. - * - * @param session The session from which the user is extracted - * @param clientId The client id - * @param value The value to accumulate - * @param timeMs The time at which to accumulate the value - * @return The throttle time in milliseconds defines as the time to wait until the average - * rate gets back to the defined quota - */ - def recordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - try { - clientSensors.quotaSensor.record(value, timeMs, true) - 0 - } catch { - case e: QuotaViolationException => - val throttleTimeMs = throttleTime(e, timeMs).toInt - debug(s"Quota violated for sensor (${clientSensors.quotaSensor.name}). Delay time: ($throttleTimeMs)") - throttleTimeMs - } - } - - /** - * Records that a user/clientId changed some metric being throttled without checking for - * quota violation. The aggregate value will subsequently be used for throttling when the - * next request is processed. - */ - def recordNoThrottle(session: Session, clientId: String, value: Double): Unit = { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - clientSensors.quotaSensor.record(value, time.milliseconds(), false) - } - - /** - * "Unrecord" the given value that has already been recorded for the given user/client by recording a negative value - * of the same quantity. - * - * For a throttled fetch, the broker should return an empty response and thus should not record the value. Ideally, - * we would like to compute the throttle time before actually recording the value, but the current Sensor code - * couples value recording and quota checking very tightly. As a workaround, we will unrecord the value for the fetch - * in case of throttling. Rate keeps the sum of values that fall in each time window, so this should bring the - * overall sum back to the previous value. - */ - def unrecordQuotaSensor(request: RequestChannel.Request, value: Double, timeMs: Long): Unit = { - val clientSensors = getOrCreateQuotaSensors(request.session, request.header.clientId) - clientSensors.quotaSensor.record(value * -1, timeMs, false) - } - - /** - * Returns maximum value that could be recorded without guaranteed throttling. - * Recording any larger value will always be throttled, even if no other values were recorded in the quota window. - * This is used for deciding the maximum bytes that can be fetched at once - */ - def getMaxValueInQuotaWindow(session: Session, clientId: String): Double = { - if (quotasEnabled) { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - Option(quotaCallback.quotaLimit(clientQuotaType, clientSensors.metricTags.asJava)) - .map(_.toDouble * (config.numQuotaSamples - 1) * config.quotaWindowSizeSeconds) - .getOrElse(Double.MaxValue) - } else { - Double.MaxValue - } - } - - /** - * Throttle a client by muting the associated channel for the given throttle time. - * - * @param request client request - * @param throttleTimeMs Duration in milliseconds for which the channel is to be muted. - * @param throttleCallback Callback for channel throttling - */ - def throttle( - request: RequestChannel.Request, - throttleCallback: ThrottleCallback, - throttleTimeMs: Int - ): Unit = { - if (throttleTimeMs > 0) { - val clientSensors = getOrCreateQuotaSensors(request.session, request.headerForLoggingOrThrottling().clientId) - clientSensors.throttleTimeSensor.record(throttleTimeMs) - val throttledChannel = new ThrottledChannel(time, throttleTimeMs, throttleCallback) - delayQueue.add(throttledChannel) - delayQueueSensor.record() - debug("Channel throttled for sensor (%s). Delay time: (%d)".format(clientSensors.quotaSensor.name(), throttleTimeMs)) - } - } - - /** - * Returns the quota for the client with the specified (non-encoded) user principal and client-id. - * - * Note: this method is expensive, it is meant to be used by tests only - */ - def quota(user: String, clientId: String): Quota = { - val userPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user) - quota(userPrincipal, clientId) - } - - /** - * Returns the quota for the client with the specified user principal and client-id. - * - * Note: this method is expensive, it is meant to be used by tests only - */ - def quota(userPrincipal: KafkaPrincipal, clientId: String): Quota = { - val metricTags = quotaCallback.quotaMetricTags(clientQuotaType, userPrincipal, clientId) - Quota.upperBound(quotaLimit(metricTags)) - } - - private def quotaLimit(metricTags: util.Map[String, String]): Double = { - Option(quotaCallback.quotaLimit(clientQuotaType, metricTags)).map(_.toDouble).getOrElse(Long.MaxValue) - } - - /** - * This calculates the amount of time needed to bring the metric within quota - * assuming that no new metrics are recorded. - * - * See {QuotaUtils.throttleTime} for the details. - */ - protected def throttleTime(e: QuotaViolationException, timeMs: Long): Long = { - QuotaUtils.throttleTime(e, timeMs) - } - - /** - * This function either returns the sensors for a given client id or creates them if they don't exist - * First sensor of the tuple is the quota enforcement sensor. Second one is the throttle time sensor - */ - def getOrCreateQuotaSensors(session: Session, clientId: String): ClientSensors = { - // Use cached sanitized principal if using default callback - val metricTags = quotaCallback match { - case callback: DefaultQuotaCallback => callback.quotaMetricTags(session.sanitizedUser, clientId) - case _ => quotaCallback.quotaMetricTags(clientQuotaType, session.principal, clientId).asScala.toMap - } - // Names of the sensors to access - val sensors = ClientSensors( - metricTags, - sensorAccessor.getOrCreate( - getQuotaSensorName(metricTags), - ClientQuotaManager.InactiveSensorExpirationTimeSeconds, - sensor => registerQuotaMetrics(metricTags)(sensor) - ), - sensorAccessor.getOrCreate( - getThrottleTimeSensorName(metricTags), - ClientQuotaManager.InactiveSensorExpirationTimeSeconds, - sensor => sensor.add(throttleMetricName(metricTags), new Avg) - ) - ) - if (quotaCallback.quotaResetRequired(clientQuotaType)) - updateQuotaMetricConfigs() - sensors - } - - protected def registerQuotaMetrics(metricTags: Map[String, String])(sensor: Sensor): Unit = { - sensor.add( - clientQuotaMetricName(metricTags), - new Rate, - getQuotaMetricConfig(metricTags) - ) - } - - private def metricTagsToSensorSuffix(metricTags: Map[String, String]): String = - metricTags.values.mkString(":") - - private def getThrottleTimeSensorName(metricTags: Map[String, String]): String = - s"${quotaType}ThrottleTime-${metricTagsToSensorSuffix(metricTags)}" - - private def getQuotaSensorName(metricTags: Map[String, String]): String = - s"$quotaType-${metricTagsToSensorSuffix(metricTags)}" - - protected def getQuotaMetricConfig(metricTags: Map[String, String]): MetricConfig = { - getQuotaMetricConfig(quotaLimit(metricTags.asJava)) - } - - private def getQuotaMetricConfig(quotaLimit: Double): MetricConfig = { - new MetricConfig() - .timeWindow(config.quotaWindowSizeSeconds, TimeUnit.SECONDS) - .samples(config.numQuotaSamples) - .quota(new Quota(quotaLimit, true)) - } - - protected def getOrCreateSensor(sensorName: String, expirationTimeSeconds: Long, registerMetrics: Consumer[Sensor]): Sensor = { - sensorAccessor.getOrCreate( - sensorName, - expirationTimeSeconds, - registerMetrics) - } - - /** - * Overrides quotas for , or or the dynamic defaults - * for any of these levels. - * - * @param userEntity user to override if quota applies to or - * @param clientEntity sanitized client entity to override if quota applies to or - * @param quota custom quota to apply or None if quota override is being removed - */ - def updateQuota( - userEntity: Option[BaseUserEntity], - clientEntity: Option[ClientQuotaEntity.ConfigEntity], - quota: Option[Quota] - ): Unit = { - /* - * Acquire the write lock to apply changes in the quota objects. - * This method changes the quota in the overriddenQuota map and applies the update on the actual KafkaMetric object (if it exists). - * If the KafkaMetric hasn't been created, the most recent value will be used from the overriddenQuota map. - * The write lock prevents quota update and creation at the same time. It also guards against concurrent quota change - * notifications - */ - lock.writeLock().lock() - try { - val quotaEntity = KafkaQuotaEntity(userEntity, clientEntity) - - if (userEntity.nonEmpty) { - if (quotaEntity.clientIdEntity.nonEmpty) - quotaTypesEnabled |= QuotaTypes.UserClientIdQuotaEnabled - else - quotaTypesEnabled |= QuotaTypes.UserQuotaEnabled - } else if (clientEntity.nonEmpty) - quotaTypesEnabled |= QuotaTypes.ClientIdQuotaEnabled - - quota match { - case Some(newQuota) => quotaCallback.updateQuota(clientQuotaType, quotaEntity, newQuota.bound) - case None => quotaCallback.removeQuota(clientQuotaType, quotaEntity) - } - val updatedEntity = if (userEntity.contains(DefaultUserEntity) || clientEntity.contains(DefaultClientIdEntity)) - None // more than one entity may need updating, so `updateQuotaMetricConfigs` will go through all metrics - else - Some(quotaEntity) - updateQuotaMetricConfigs(updatedEntity) - - } finally { - lock.writeLock().unlock() - } - } - - /** - * Updates metrics configs. This is invoked when quota configs are updated when partitions leaders change - * and custom callbacks that implement partition-based quotas have updated quotas. - * - * @param updatedQuotaEntity If set to one entity and quotas have only been enabled at one - * level, then an optimized update is performed with a single metric update. If None is provided, - * or if custom callbacks are used or if multi-level quotas have been enabled, all metric configs - * are checked and updated if required. - */ - def updateQuotaMetricConfigs(updatedQuotaEntity: Option[KafkaQuotaEntity] = None): Unit = { - val allMetrics = metrics.metrics() - - // If using custom quota callbacks or if multiple-levels of quotas are defined or - // if this is a default quota update, traverse metrics to find all affected values. - // Otherwise, update just the single matching one. - val singleUpdate = quotaTypesEnabled match { - case QuotaTypes.NoQuotas | QuotaTypes.ClientIdQuotaEnabled | QuotaTypes.UserQuotaEnabled | QuotaTypes.UserClientIdQuotaEnabled => - updatedQuotaEntity.nonEmpty - case _ => false - } - if (singleUpdate) { - val quotaEntity = updatedQuotaEntity.getOrElse(throw new IllegalStateException("Quota entity not specified")) - val user = quotaEntity.sanitizedUser - val clientId = quotaEntity.clientId - val metricTags = Map(DefaultTags.User -> user, DefaultTags.ClientId -> clientId) - - val quotaMetricName = clientQuotaMetricName(metricTags) - // Change the underlying metric config if the sensor has been created - val metric = allMetrics.get(quotaMetricName) - if (metric != null) { - Option(quotaLimit(metricTags.asJava)).foreach { newQuota => - info(s"Sensor for $quotaEntity already exists. Changing quota to $newQuota in MetricConfig") - metric.config(getQuotaMetricConfig(newQuota)) - } - } - } else { - val quotaMetricName = clientQuotaMetricName(Map.empty) - allMetrics.forEach { (metricName, metric) => - if (metricName.name == quotaMetricName.name && metricName.group == quotaMetricName.group) { - val metricTags = metricName.tags - Option(quotaLimit(metricTags)).foreach { newQuota => - if (newQuota != metric.config.quota.bound) { - info(s"Sensor for quota-id $metricTags already exists. Setting quota to $newQuota in MetricConfig") - metric.config(getQuotaMetricConfig(newQuota)) - } - } - } - } - } - } - - /** - * Returns the MetricName of the metric used for the quota. The name is used to create the - * metric but also to find the metric when the quota is changed. - */ - protected def clientQuotaMetricName(quotaMetricTags: Map[String, String]): MetricName = { - metrics.metricName("byte-rate", quotaType.toString, - "Tracking byte-rate per user/client-id", - quotaMetricTags.asJava) - } - - private def throttleMetricName(quotaMetricTags: Map[String, String]): MetricName = { - metrics.metricName("throttle-time", - quotaType.toString, - "Tracking average throttle-time per user/client-id", - quotaMetricTags.asJava) - } - - def initiateShutdown(): Unit = { - throttledChannelReaper.initiateShutdown() - // improve shutdown time by waking up any ShutdownableThread(s) blocked on poll by sending a no-op - delayQueue.add(new ThrottledChannel(time, 0, new ThrottleCallback { - override def startThrottling(): Unit = {} - override def endThrottling(): Unit = {} - })) - } - - def shutdown(): Unit = { - initiateShutdown() - throttledChannelReaper.awaitShutdown() - } - - private class DefaultQuotaCallback extends ClientQuotaCallback { - private val overriddenQuotas = new ConcurrentHashMap[ClientQuotaEntity, Quota]() - - override def configure(configs: util.Map[String, _]): Unit = {} - - override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = { - quotaMetricTags(Sanitizer.sanitize(principal.getName), clientId).asJava - } - - override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = { - val sanitizedUser = metricTags.get(DefaultTags.User) - val clientId = metricTags.get(DefaultTags.ClientId) - var quota: Quota = null - - if (sanitizedUser != null && clientId != null) { - val userEntity = Some(UserEntity(sanitizedUser)) - val clientIdEntity = Some(ClientIdEntity(clientId)) - if (sanitizedUser.nonEmpty && clientId.nonEmpty) { - // /config/users//clients/ - quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, clientIdEntity)) - if (quota == null) { - // /config/users//clients/ - quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, Some(DefaultClientIdEntity))) - } - if (quota == null) { - // /config/users//clients/ - quota = overriddenQuotas.get(KafkaQuotaEntity(Some(DefaultUserEntity), clientIdEntity)) - } - if (quota == null) { - // /config/users//clients/ - quota = overriddenQuotas.get(DefaultUserClientIdQuotaEntity) - } - } else if (sanitizedUser.nonEmpty) { - // /config/users/ - quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, None)) - if (quota == null) { - // /config/users/ - quota = overriddenQuotas.get(DefaultUserQuotaEntity) - } - } else if (clientId.nonEmpty) { - // /config/clients/ - quota = overriddenQuotas.get(KafkaQuotaEntity(None, clientIdEntity)) - if (quota == null) { - // /config/clients/ - quota = overriddenQuotas.get(DefaultClientIdQuotaEntity) - } - } - } - if (quota == null) null else quota.bound - } - - override def updateClusterMetadata(cluster: Cluster): Boolean = { - // Default quota callback does not use any cluster metadata - false - } - - override def updateQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity, newValue: Double): Unit = { - val quotaEntity = entity.asInstanceOf[KafkaQuotaEntity] - info(s"Changing $quotaType quota for $quotaEntity to $newValue") - overriddenQuotas.put(quotaEntity, new Quota(newValue, true)) - } - - override def removeQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity): Unit = { - val quotaEntity = entity.asInstanceOf[KafkaQuotaEntity] - info(s"Removing $quotaType quota for $quotaEntity") - overriddenQuotas.remove(quotaEntity) - } - - override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = false - - def quotaMetricTags(sanitizedUser: String, clientId: String) : Map[String, String] = { - val (userTag, clientIdTag) = quotaTypesEnabled match { - case QuotaTypes.NoQuotas | QuotaTypes.ClientIdQuotaEnabled => - ("", clientId) - case QuotaTypes.UserQuotaEnabled => - (sanitizedUser, "") - case QuotaTypes.UserClientIdQuotaEnabled => - (sanitizedUser, clientId) - case _ => - val userEntity = Some(UserEntity(sanitizedUser)) - val clientIdEntity = Some(ClientIdEntity(clientId)) - - var metricTags = (sanitizedUser, clientId) - // 1) /config/users//clients/ - if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, clientIdEntity))) { - // 2) /config/users//clients/ - metricTags = (sanitizedUser, clientId) - if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, Some(DefaultClientIdEntity)))) { - // 3) /config/users/ - metricTags = (sanitizedUser, "") - if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, None))) { - // 4) /config/users//clients/ - metricTags = (sanitizedUser, clientId) - if (!overriddenQuotas.containsKey(KafkaQuotaEntity(Some(DefaultUserEntity), clientIdEntity))) { - // 5) /config/users//clients/ - metricTags = (sanitizedUser, clientId) - if (!overriddenQuotas.containsKey(DefaultUserClientIdQuotaEntity)) { - // 6) /config/users/ - metricTags = (sanitizedUser, "") - if (!overriddenQuotas.containsKey(DefaultUserQuotaEntity)) { - // 7) /config/clients/ - // 8) /config/clients/ - metricTags = ("", clientId) - } - } - } - } - } - } - metricTags - } - Map(DefaultTags.User -> userTag, DefaultTags.ClientId -> clientIdTag) - } - - override def close(): Unit = {} - } -} diff --git a/core/src/main/scala/kafka/server/ConfigAdminManager.scala b/core/src/main/scala/kafka/server/ConfigAdminManager.scala index 45a68d2d03634..7394d2cfc43c6 100644 --- a/core/src/main/scala/kafka/server/ConfigAdminManager.scala +++ b/core/src/main/scala/kafka/server/ConfigAdminManager.scala @@ -151,7 +151,7 @@ class ConfigAdminManager(nodeId: Int, } catch { case t: Throwable => val err = ApiError.fromThrowable(t) - info(s"Error preprocessing incrementalAlterConfigs request on $configResource", t) + error(s"Error preprocessing incrementalAlterConfigs request on $configResource", t) results.put(resource, err) } } @@ -252,7 +252,7 @@ class ConfigAdminManager(nodeId: Int, } catch { case t: Throwable => val err = ApiError.fromThrowable(t) - info(s"Error preprocessing alterConfigs request on $configResource: $err") + error(s"Error preprocessing alterConfigs request on ${configResource}: ${err}") results.put(resource, err) } } diff --git a/core/src/main/scala/kafka/server/ConfigHelper.scala b/core/src/main/scala/kafka/server/ConfigHelper.scala index 453ee0dc97233..743937b54fca5 100644 --- a/core/src/main/scala/kafka/server/ConfigHelper.scala +++ b/core/src/main/scala/kafka/server/ConfigHelper.scala @@ -20,9 +20,9 @@ package kafka.server import kafka.network.RequestChannel import java.util.{Collections, Properties} -import kafka.utils.{LoggingController, Logging} +import kafka.utils.Logging import org.apache.kafka.common.acl.AclOperation.DESCRIBE_CONFIGS -import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigResource} +import org.apache.kafka.common.config.{ConfigDef, ConfigResource} import org.apache.kafka.common.errors.{ApiException, InvalidRequestException} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DescribeConfigsRequestData.DescribeConfigsResource @@ -34,7 +34,9 @@ import org.apache.kafka.common.resource.Resource.CLUSTER_NAME import org.apache.kafka.common.resource.ResourceType.{CLUSTER, GROUP, TOPIC} import org.apache.kafka.coordinator.group.GroupConfig import org.apache.kafka.metadata.{ConfigRepository, MetadataCache} +import org.apache.kafka.server.ConfigHelperUtils.createResponseConfig import org.apache.kafka.server.config.ServerTopicConfigSynonyms +import org.apache.kafka.server.logger.LoggingController import org.apache.kafka.server.metrics.ClientMetricsConfigs import org.apache.kafka.storage.internals.log.LogConfig @@ -44,10 +46,6 @@ import scala.jdk.OptionConverters.RichOptional class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepository: ConfigRepository) extends Logging { - def allConfigs(config: AbstractConfig): mutable.Map[String, Any] = { - config.originals.asScala.filter(_._2 != null) ++ config.nonInternalValues.asScala - } - def handleDescribeConfigsRequest( request: RequestChannel.Request, authHelper: AuthHelper @@ -85,21 +83,6 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo includeSynonyms: Boolean, includeDocumentation: Boolean): List[DescribeConfigsResponseData.DescribeConfigsResult] = { resourceToConfigNames.map { resource => - - def createResponseConfig(configs: Map[String, Any], - createConfigEntry: (String, Any) => DescribeConfigsResponseData.DescribeConfigsResourceResult): DescribeConfigsResponseData.DescribeConfigsResult = { - val filteredConfigPairs = if (resource.configurationKeys == null || resource.configurationKeys.isEmpty) - configs.toBuffer - else - configs.filter { case (configName, _) => - resource.configurationKeys.asScala.contains(configName) - }.toBuffer - - val configEntries = filteredConfigPairs.map { case (name, value) => createConfigEntry(name, value) } - new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(Errors.NONE.code) - .setConfigs(configEntries.asJava) - } - try { val configResult = ConfigResource.Type.forId(resource.resourceType) match { case ConfigResource.Type.TOPIC => @@ -108,7 +91,7 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo if (metadataCache.contains(topic)) { val topicProps = configRepository.topicConfig(topic) val logConfig = LogConfig.fromProps(config.extractLogConfigMap, topicProps) - createResponseConfig(allConfigs(logConfig), createTopicConfigEntry(logConfig, topicProps, includeSynonyms, includeDocumentation)) + createResponseConfig(resource, logConfig, createTopicConfigEntry(logConfig, topicProps, includeSynonyms, includeDocumentation)(_, _)) } else { new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) .setConfigs(Collections.emptyList[DescribeConfigsResponseData.DescribeConfigsResourceResult]) @@ -116,11 +99,11 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo case ConfigResource.Type.BROKER => if (resource.resourceName == null || resource.resourceName.isEmpty) - createResponseConfig(config.dynamicConfig.currentDynamicDefaultConfigs, - createBrokerConfigEntry(perBrokerConfig = false, includeSynonyms, includeDocumentation)) + createResponseConfig(resource, config.dynamicConfig.currentDynamicDefaultConfigs.asJava, + createBrokerConfigEntry(perBrokerConfig = false, includeSynonyms, includeDocumentation)(_, _)) else if (resourceNameToBrokerId(resource.resourceName) == config.brokerId) - createResponseConfig(allConfigs(config), - createBrokerConfigEntry(perBrokerConfig = true, includeSynonyms, includeDocumentation)) + createResponseConfig(resource, config, + createBrokerConfigEntry(perBrokerConfig = true, includeSynonyms, includeDocumentation)(_, _)) else throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} or empty string, but received ${resource.resourceName}") @@ -130,8 +113,8 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo else if (resourceNameToBrokerId(resource.resourceName) != config.brokerId) throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} but received ${resource.resourceName}") else - createResponseConfig(LoggingController.loggers, - (name, value) => new DescribeConfigsResponseData.DescribeConfigsResourceResult().setName(name) + createResponseConfig(resource, LoggingController.loggers, + (name: String, value: Object) => new DescribeConfigsResponseData.DescribeConfigsResourceResult().setName(name) .setValue(value.toString).setConfigSource(ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG.id) .setIsSensitive(false).setReadOnly(false).setSynonyms(List.empty.asJava)) @@ -141,7 +124,7 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo } else { val clientMetricsProps = configRepository.config(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, resource.resourceName)) val clientMetricsConfig = ClientMetricsConfigs.fromProps(ClientMetricsConfigs.defaultConfigsMap(), clientMetricsProps) - createResponseConfig(allConfigs(clientMetricsConfig), createClientMetricsConfigEntry(clientMetricsConfig, clientMetricsProps, includeSynonyms, includeDocumentation)) + createResponseConfig(resource, clientMetricsConfig, createClientMetricsConfigEntry(clientMetricsConfig, clientMetricsProps, includeSynonyms, includeDocumentation)(_, _)) } case ConfigResource.Type.GROUP => @@ -151,7 +134,7 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo } else { val groupProps = configRepository.groupConfig(group) val groupConfig = GroupConfig.fromProps(config.groupCoordinatorConfig.extractGroupConfigMap(config.shareGroupConfig), groupProps) - createResponseConfig(allConfigs(groupConfig), createGroupConfigEntry(groupConfig, groupProps, includeSynonyms, includeDocumentation)) + createResponseConfig(resource, groupConfig, createGroupConfigEntry(groupConfig, groupProps, includeSynonyms, includeDocumentation)(_, _)) } case resourceType => throw new InvalidRequestException(s"Unsupported resource type: $resourceType") @@ -321,4 +304,4 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo throw new InvalidRequestException(s"Broker id must be an integer, but it is: $resourceName") } } -} +} \ No newline at end of file diff --git a/core/src/main/scala/kafka/server/ControllerApis.scala b/core/src/main/scala/kafka/server/ControllerApis.scala index 1f29f41808bfd..f10b769d9c12f 100644 --- a/core/src/main/scala/kafka/server/ControllerApis.scala +++ b/core/src/main/scala/kafka/server/ControllerApis.scala @@ -24,7 +24,6 @@ import java.util.Map.Entry import java.util.concurrent.CompletableFuture import java.util.function.Consumer import kafka.network.RequestChannel -import kafka.raft.RaftManager import kafka.server.QuotaFactory.QuotaManagers import kafka.server.logger.RuntimeLoggerManager import kafka.server.metadata.KRaftMetadataCache @@ -41,7 +40,7 @@ import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartit import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult import org.apache.kafka.common.message.DeleteTopicsResponseData.{DeletableTopicResult, DeletableTopicResultCollection} import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse -import org.apache.kafka.common.message.{CreateTopicsRequestData, _} +import org.apache.kafka.common.message._ import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors} import org.apache.kafka.common.requests._ @@ -55,9 +54,12 @@ import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.metadata.{BrokerHeartbeatReply, BrokerRegistrationReply} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.server.{ApiVersionManager, DelegationTokenManager, ProcessRole} +import org.apache.kafka.raft.RaftManager +import org.apache.kafka.security.DelegationTokenManager +import org.apache.kafka.server.{ApiVersionManager, ProcessRole} import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.{ApiMessageAndVersion, RequestLocal} +import org.apache.kafka.server.quota.ControllerMutationQuota import scala.jdk.CollectionConverters._ @@ -196,7 +198,7 @@ class ControllerApis( private def handleDeleteTopics(request: RequestChannel.Request): CompletableFuture[Unit] = { val deleteTopicsRequest = request.body[DeleteTopicsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 5) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 5) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, deleteTopicsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -229,9 +231,9 @@ class ControllerApis( // Check if topic deletion is enabled at all. if (!config.deleteTopicEnable) { if (apiVersion < 3) { - throw new InvalidRequestException("Topic deletion is disabled.") + return CompletableFuture.failedFuture(new InvalidRequestException("This version does not support topic deletion.")) } else { - throw new TopicDeletionDisabledException() + return CompletableFuture.failedFuture(new TopicDeletionDisabledException()) } } // The first step is to load up the names and IDs that have been provided by the @@ -360,7 +362,7 @@ class ControllerApis( private def handleCreateTopics(request: RequestChannel.Request): CompletableFuture[Unit] = { val createTopicsRequest = request.body[CreateTopicsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 6) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 6) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, createTopicsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -795,7 +797,7 @@ class ControllerApis( authHelper.filterByAuthorized(request.context, ALTER, TOPIC, topics)(n => n) } val createPartitionsRequest = request.body[CreatePartitionsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 3) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 3) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, createPartitionsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -1069,7 +1071,7 @@ class ControllerApis( EndpointType.CONTROLLER, clusterId, () => registrationsPublisher.describeClusterControllers(request.context.listenerName()), - () => raftManager.leaderAndEpoch.leaderId().orElse(-1) + () => raftManager.client.leaderAndEpoch.leaderId().orElse(-1) ) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeClusterResponse(response.setThrottleTimeMs(requestThrottleMs))) diff --git a/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala b/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala deleted file mode 100644 index 138151a7ba55a..0000000000000 --- a/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala +++ /dev/null @@ -1,283 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server - -import kafka.network.RequestChannel -import org.apache.kafka.common.MetricName -import org.apache.kafka.common.errors.ThrottlingQuotaExceededException -import org.apache.kafka.common.internals.Plugin -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.metrics.QuotaViolationException -import org.apache.kafka.common.metrics.Sensor -import org.apache.kafka.common.metrics.stats.Rate -import org.apache.kafka.common.metrics.stats.TokenBucket -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.utils.Time -import org.apache.kafka.network.Session -import org.apache.kafka.server.quota.{ClientQuotaCallback, QuotaType} -import org.apache.kafka.server.config.ClientQuotaManagerConfig - -import scala.jdk.CollectionConverters._ - -/** - * The ControllerMutationQuota trait defines a quota for a given user/clientId pair. Such - * quota is not meant to be cached forever but rather during the lifetime of processing - * a request. - */ -trait ControllerMutationQuota { - def isExceeded: Boolean - def record(permits: Double): Unit - def throttleTime: Int -} - -/** - * Default quota used when quota is disabled. - */ -object UnboundedControllerMutationQuota extends ControllerMutationQuota { - override def isExceeded: Boolean = false - override def record(permits: Double): Unit = () - override def throttleTime: Int = 0 -} - -/** - * The AbstractControllerMutationQuota is the base class of StrictControllerMutationQuota and - * PermissiveControllerMutationQuota. - * - * @param time @Time object to use - */ -abstract class AbstractControllerMutationQuota(private val time: Time) extends ControllerMutationQuota { - protected var lastThrottleTimeMs = 0L - private var lastRecordedTimeMs = 0L - - protected def updateThrottleTime(e: QuotaViolationException, timeMs: Long): Unit = { - lastThrottleTimeMs = ControllerMutationQuotaManager.throttleTimeMs(e) - lastRecordedTimeMs = timeMs - } - - override def throttleTime: Int = { - // If a throttle time has been recorded, we adjust it by deducting the time elapsed - // between the recording and now. We do this because `throttleTime` may be called - // long after having recorded it, especially when a request waits in the purgatory. - val deltaTimeMs = time.milliseconds - lastRecordedTimeMs - Math.max(0, lastThrottleTimeMs - deltaTimeMs).toInt - } -} - -/** - * The StrictControllerMutationQuota defines a strict quota for a given user/clientId pair. The - * quota is strict meaning that 1) it does not accept any mutations once the quota is exhausted - * until it gets back to the defined rate; and 2) it does not throttle for any number of mutations - * if quota is not already exhausted. - * - * @param time @Time object to use - * @param quotaSensor @Sensor object with a defined quota for a given user/clientId pair - */ -class StrictControllerMutationQuota(private val time: Time, - private val quotaSensor: Sensor) - extends AbstractControllerMutationQuota(time) { - - override def isExceeded: Boolean = lastThrottleTimeMs > 0 - - override def record(permits: Double): Unit = { - val timeMs = time.milliseconds - try { - quotaSensor synchronized { - quotaSensor.checkQuotas(timeMs) - quotaSensor.record(permits, timeMs, false) - } - } catch { - case e: QuotaViolationException => - updateThrottleTime(e, timeMs) - throw new ThrottlingQuotaExceededException(lastThrottleTimeMs.toInt, - Errors.THROTTLING_QUOTA_EXCEEDED.message) - } - } -} - -/** - * The PermissiveControllerMutationQuota defines a permissive quota for a given user/clientId pair. - * The quota is permissive meaning that 1) it does accept any mutations even if the quota is - * exhausted; and 2) it does throttle as soon as the quota is exhausted. - * - * @param time @Time object to use - * @param quotaSensor @Sensor object with a defined quota for a given user/clientId pair - */ -class PermissiveControllerMutationQuota(private val time: Time, - private val quotaSensor: Sensor) - extends AbstractControllerMutationQuota(time) { - - override def isExceeded: Boolean = false - - override def record(permits: Double): Unit = { - val timeMs = time.milliseconds - try { - quotaSensor.record(permits, timeMs, true) - } catch { - case e: QuotaViolationException => - updateThrottleTime(e, timeMs) - } - } -} - -object ControllerMutationQuotaManager { - - /** - * This calculates the amount of time needed to bring the TokenBucket within quota - * assuming that no new metrics are recorded. - * - * Basically, if a value < 0 is observed, the time required to bring it to zero is - * -value / refill rate (quota bound) * 1000. - */ - def throttleTimeMs(e: QuotaViolationException): Long = { - e.metric().measurable() match { - case _: TokenBucket => - Math.round(-e.value() / e.bound() * 1000) - case _ => throw new IllegalArgumentException( - s"Metric ${e.metric().metricName()} is not a TokenBucket metric, value ${e.metric().measurable()}") - } - } -} - -/** - * The ControllerMutationQuotaManager is a specialized ClientQuotaManager used in the context - * of throttling controller's operations/mutations. - * - * @param config @ClientQuotaManagerConfig quota configs - * @param metrics @Metrics Metrics instance - * @param time @Time object to use - * @param threadNamePrefix The thread prefix to use - * @param quotaCallback @ClientQuotaCallback ClientQuotaCallback to use - */ -class ControllerMutationQuotaManager(private val config: ClientQuotaManagerConfig, - private val metrics: Metrics, - private val time: Time, - private val threadNamePrefix: String, - private val quotaCallback: Option[Plugin[ClientQuotaCallback]]) - extends ClientQuotaManager(config, metrics, QuotaType.CONTROLLER_MUTATION, time, threadNamePrefix, quotaCallback) { - - override protected def clientQuotaMetricName(quotaMetricTags: Map[String, String]): MetricName = { - metrics.metricName("tokens", QuotaType.CONTROLLER_MUTATION.toString, - "Tracking remaining tokens in the token bucket per user/client-id", - quotaMetricTags.asJava) - } - - private def clientRateMetricName(quotaMetricTags: Map[String, String]): MetricName = { - metrics.metricName("mutation-rate", QuotaType.CONTROLLER_MUTATION.toString, - "Tracking mutation-rate per user/client-id", - quotaMetricTags.asJava) - } - - override protected def registerQuotaMetrics(metricTags: Map[String, String])(sensor: Sensor): Unit = { - sensor.add( - clientRateMetricName(metricTags), - new Rate - ) - sensor.add( - clientQuotaMetricName(metricTags), - new TokenBucket, - getQuotaMetricConfig(metricTags) - ) - } - - /** - * Records that a user/clientId accumulated or would like to accumulate the provided amount at the - * specified time, returns throttle time in milliseconds. The quota is strict meaning that it - * does not accept any mutations once the quota is exhausted until it gets back to the defined rate. - * - * @param session The session from which the user is extracted - * @param clientId The client id - * @param value The value to accumulate - * @param timeMs The time at which to accumulate the value - * @return The throttle time in milliseconds defines as the time to wait until the average - * rate gets back to the defined quota - */ - override def recordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - val quotaSensor = clientSensors.quotaSensor - try { - quotaSensor synchronized { - quotaSensor.checkQuotas(timeMs) - quotaSensor.record(value, timeMs, false) - } - 0 - } catch { - case e: QuotaViolationException => - val throttleTimeMs = ControllerMutationQuotaManager.throttleTimeMs(e).toInt - debug(s"Quota violated for sensor (${quotaSensor.name}). Delay time: ($throttleTimeMs)") - throttleTimeMs - } - } - - /** - * Returns a StrictControllerMutationQuota for the given user/clientId pair or - * a UnboundedControllerMutationQuota$ if the quota is disabled. - * - * @param session The session from which the user is extracted - * @param clientId The client id - * @return ControllerMutationQuota - */ - def newStrictQuotaFor(session: Session, clientId: String): ControllerMutationQuota = { - if (quotasEnabled) { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - new StrictControllerMutationQuota(time, clientSensors.quotaSensor) - } else { - UnboundedControllerMutationQuota - } - } - - def newStrictQuotaFor(request: RequestChannel.Request): ControllerMutationQuota = - newStrictQuotaFor(request.session, request.header.clientId) - - /** - * Returns a PermissiveControllerMutationQuota for the given user/clientId pair or - * a UnboundedControllerMutationQuota$ if the quota is disabled. - * - * @param session The session from which the user is extracted - * @param clientId The client id - * @return ControllerMutationQuota - */ - def newPermissiveQuotaFor(session: Session, clientId: String): ControllerMutationQuota = { - if (quotasEnabled) { - val clientSensors = getOrCreateQuotaSensors(session, clientId) - new PermissiveControllerMutationQuota(time, clientSensors.quotaSensor) - } else { - UnboundedControllerMutationQuota - } - } - - def newPermissiveQuotaFor(request: RequestChannel.Request): ControllerMutationQuota = - newPermissiveQuotaFor(request.session, request.header.clientId) - - /** - * Returns a ControllerMutationQuota based on `strictSinceVersion`. It returns a strict - * quota if the version is equal to or above of the `strictSinceVersion`, a permissive - * quota if the version is below, and an unbounded quota if the quota is disabled. - * - * When the quota is strictly enforced. Any operation above the quota is not allowed - * and rejected with a THROTTLING_QUOTA_EXCEEDED error. - * - * @param request The request to extract the user and the clientId from - * @param strictSinceVersion The version since quota is strict - * @return - */ - def newQuotaFor(request: RequestChannel.Request, strictSinceVersion: Short): ControllerMutationQuota = { - if (request.header.apiVersion() >= strictSinceVersion) - newStrictQuotaFor(request) - else - newPermissiveQuotaFor(request) - } -} diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index 3fbc333f85fb5..460e2969706a1 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -22,7 +22,7 @@ import kafka.raft.KafkaRaftManager import kafka.server.QuotaFactory.QuotaManagers import scala.collection.immutable -import kafka.server.metadata.{ClientQuotaMetadataManager, DelegationTokenPublisher, DynamicClientQuotaPublisher, DynamicConfigPublisher, DynamicTopicClusterQuotaPublisher, KRaftMetadataCache, KRaftMetadataCachePublisher, ScramPublisher} +import kafka.server.metadata.{ClientQuotaMetadataManager, DynamicClientQuotaPublisher, DynamicConfigPublisher, DynamicTopicClusterQuotaPublisher, KRaftMetadataCache, KRaftMetadataCachePublisher} import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.ApiMessageType.ListenerType @@ -38,14 +38,15 @@ import org.apache.kafka.image.publisher.{ControllerRegistrationsPublisher, Metad import org.apache.kafka.metadata.{KafkaConfigSchema, ListenerInfo} import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer import org.apache.kafka.metadata.bootstrap.BootstrapMetadata -import org.apache.kafka.metadata.publisher.{AclPublisher, FeaturesPublisher} +import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, FeaturesPublisher, ScramPublisher} import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.{DelegationTokenManager, ProcessRole, SimpleApiVersionManager} +import org.apache.kafka.security.{CredentialProvider, DelegationTokenManager} +import org.apache.kafka.server.{ProcessRole, SimpleApiVersionManager} import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.config.ServerLogConfigs.{ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG} import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, NodeToControllerChannelManager} -import org.apache.kafka.server.config.{ConfigType, DelegationTokenManagerConfigs} +import org.apache.kafka.server.config.ConfigType +import org.apache.kafka.server.config.DelegationTokenManagerConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.server.network.{EndpointReadyFutures, KafkaAuthorizerServerInfo} import org.apache.kafka.server.policy.{AlterConfigPolicy, CreateTopicPolicy} @@ -70,7 +71,10 @@ class ControllerServer( import kafka.server.Server._ - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "ControllerServer" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) val config = sharedServer.controllerConfig val logContext = new LogContext(s"[ControllerServer id=${config.nodeId}] ") @@ -145,7 +149,7 @@ class ControllerServer( metadataCachePublisher = new KRaftMetadataCachePublisher(metadataCache) - featuresPublisher = new FeaturesPublisher(logContext) + featuresPublisher = new FeaturesPublisher(logContext, sharedServer.metadataPublishingFaultHandler) registrationsPublisher = new ControllerRegistrationsPublisher() @@ -174,7 +178,7 @@ class ControllerServer( sharedServer.socketFactory) val listenerInfo = ListenerInfo - .create(config.effectiveAdvertisedControllerListeners.map(_.toPublic).asJava) + .create(config.effectiveAdvertisedControllerListeners.asJava) .withWildcardHostnamesResolved() .withEphemeralPortsCorrected(name => socketServer.boundPort(new ListenerName(name))) socketServerFirstBoundPortFuture.complete(listenerInfo.firstListener().port()) @@ -224,7 +228,7 @@ class ControllerServer( val maxIdleIntervalNs = config.metadataMaxIdleIntervalNs.fold(OptionalLong.empty)(OptionalLong.of) - quorumControllerMetrics = new QuorumControllerMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry), time) + quorumControllerMetrics = new QuorumControllerMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry), time, config.brokerSessionTimeoutMs) new QuorumController.Builder(config.nodeId, sharedServer.clusterId). setTime(time). @@ -252,7 +256,6 @@ class ControllerServer( setDelegationTokenExpiryTimeMs(delegationTokenManagerConfigs.delegationTokenExpiryTimeMs). setDelegationTokenExpiryCheckIntervalMs(delegationTokenManagerConfigs.delegationTokenExpiryCheckIntervalMs). setUncleanLeaderElectionCheckIntervalMs(config.uncleanLeaderElectionCheckIntervalMs). - setInterBrokerListenerName(config.interBrokerListenerName.value()). setControllerPerformanceSamplePeriodMs(config.controllerPerformanceSamplePeriodMs). setControllerPerformanceAlwaysLogThresholdMs(config.controllerPerformanceAlwaysLogThresholdMs) } @@ -351,7 +354,7 @@ class ControllerServer( // Set up the SCRAM publisher. metadataPublishers.add(new ScramPublisher( - config, + config.nodeId, sharedServer.metadataPublishingFaultHandler, "controller", credentialProvider @@ -361,7 +364,7 @@ class ControllerServer( // We need a tokenManager for the Publisher // The tokenCache in the tokenManager is the same used in DelegationTokenControlManager metadataPublishers.add(new DelegationTokenPublisher( - config, + config.nodeId, sharedServer.metadataPublishingFaultHandler, "controller", new DelegationTokenManager(delegationTokenManagerConfigs, tokenCache) diff --git a/core/src/main/scala/kafka/server/DelayedFetch.scala b/core/src/main/scala/kafka/server/DelayedFetch.scala index 74a3e2b1a2997..91480bb420edd 100644 --- a/core/src/main/scala/kafka/server/DelayedFetch.scala +++ b/core/src/main/scala/kafka/server/DelayedFetch.scala @@ -187,7 +187,10 @@ class DelayedFetch( } object DelayedFetchMetrics { - private val metricsGroup = new KafkaMetricsGroup(DelayedFetchMetrics.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "DelayedFetchMetrics" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val FetcherTypeKey = "fetcherType" val followerExpiredRequestMeter: Meter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS, Map(FetcherTypeKey -> "follower").asJava) val consumerExpiredRequestMeter: Meter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS, Map(FetcherTypeKey -> "consumer").asJava) diff --git a/core/src/main/scala/kafka/server/DelayedProduce.scala b/core/src/main/scala/kafka/server/DelayedProduce.scala index 9c212416ce6b7..523158fe5594b 100644 --- a/core/src/main/scala/kafka/server/DelayedProduce.scala +++ b/core/src/main/scala/kafka/server/DelayedProduce.scala @@ -17,12 +17,11 @@ package kafka.server -import java.util.concurrent.TimeUnit -import java.util.concurrent.locks.Lock +import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import com.typesafe.scalalogging.Logger import com.yammer.metrics.core.Meter -import kafka.utils.{Logging, Pool} -import org.apache.kafka.common.TopicPartition +import kafka.utils.Logging +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.server.metrics.KafkaMetricsGroup @@ -30,7 +29,6 @@ import org.apache.kafka.server.purgatory.DelayedOperation import scala.collection._ import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption case class ProducePartitionStatus(requiredOffset: Long, responseStatus: PartitionResponse) { @volatile var acksPending = false @@ -43,7 +41,7 @@ case class ProducePartitionStatus(requiredOffset: Long, responseStatus: Partitio * The produce metadata maintained by the delayed produce operation */ case class ProduceMetadata(produceRequiredAcks: Short, - produceStatus: Map[TopicPartition, ProducePartitionStatus]) { + produceStatus: Map[TopicIdPartition, ProducePartitionStatus]) { override def toString = s"[requiredAcks: $produceRequiredAcks, partitionStatus: $produceStatus]" } @@ -59,9 +57,8 @@ object DelayedProduce { class DelayedProduce(delayMs: Long, produceMetadata: ProduceMetadata, replicaManager: ReplicaManager, - responseCallback: Map[TopicPartition, PartitionResponse] => Unit, - lockOpt: Option[Lock]) - extends DelayedOperation(delayMs, lockOpt.toJava) with Logging { + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit) + extends DelayedOperation(delayMs) with Logging { override lazy val logger: Logger = DelayedProduce.logger @@ -91,11 +88,11 @@ class DelayedProduce(delayMs: Long, */ override def tryComplete(): Boolean = { // check for each partition if it still has pending acks - produceMetadata.produceStatus.foreachEntry { (topicPartition, status) => - trace(s"Checking produce satisfaction for $topicPartition, current status $status") + produceMetadata.produceStatus.foreachEntry { (topicIdPartition, status) => + trace(s"Checking produce satisfaction for $topicIdPartition, current status $status") // skip those partitions that have already been satisfied if (status.acksPending) { - val (hasEnough, error) = replicaManager.getPartitionOrError(topicPartition) match { + val (hasEnough, error) = replicaManager.getPartitionOrError(topicIdPartition.topicPartition()) match { case Left(err) => // Case A (false, err) @@ -120,10 +117,10 @@ class DelayedProduce(delayMs: Long, } override def onExpiration(): Unit = { - produceMetadata.produceStatus.foreachEntry { (topicPartition, status) => + produceMetadata.produceStatus.foreachEntry { (topicIdPartition, status) => if (status.acksPending) { - debug(s"Expiring produce request for partition $topicPartition with status $status") - DelayedProduceMetrics.recordExpiration(topicPartition) + debug(s"Expiring produce request for partition $topicIdPartition with status $status") + DelayedProduceMetrics.recordExpiration(topicIdPartition.topicPartition()) } } } @@ -138,19 +135,20 @@ class DelayedProduce(delayMs: Long, } object DelayedProduceMetrics { - private val metricsGroup = new KafkaMetricsGroup(DelayedProduceMetrics.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "DelayedProduceMetrics" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val aggregateExpirationMeter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS) - private val partitionExpirationMeterFactory = (key: TopicPartition) => - metricsGroup.newMeter("ExpiresPerSec", - "requests", - TimeUnit.SECONDS, - Map("topic" -> key.topic, "partition" -> key.partition.toString).asJava) - private val partitionExpirationMeters = new Pool[TopicPartition, Meter](valueFactory = Some(partitionExpirationMeterFactory)) + private val partitionExpirationMeters = new ConcurrentHashMap[TopicPartition, Meter] def recordExpiration(partition: TopicPartition): Unit = { aggregateExpirationMeter.mark() - partitionExpirationMeters.getAndMaybePut(partition).mark() + partitionExpirationMeters.computeIfAbsent(partition, key => metricsGroup.newMeter("ExpiresPerSec", + "requests", + TimeUnit.SECONDS, + Map("topic" -> key.topic, "partition" -> key.partition.toString).asJava)).mark() } } diff --git a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala index e6bdce63e6880..03e6f8d230fd5 100644 --- a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala +++ b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala @@ -22,11 +22,13 @@ import kafka.utils.Logging import org.apache.kafka.common.TopicIdPartition import org.apache.kafka.common.errors._ import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.purgatory.DelayedOperation import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log.{LogOffsetMetadata, RemoteLogReadResult, RemoteStorageFetchInfo} +import java.util import java.util.concurrent.{CompletableFuture, Future, TimeUnit} import java.util.{Optional, OptionalInt, OptionalLong} import scala.collection._ @@ -35,9 +37,9 @@ import scala.collection._ * A remote fetch operation that can be created by the replica manager and watched * in the remote fetch operation purgatory */ -class DelayedRemoteFetch(remoteFetchTask: Future[Void], - remoteFetchResult: CompletableFuture[RemoteLogReadResult], - remoteFetchInfo: RemoteStorageFetchInfo, +class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Void]], + remoteFetchResults: util.Map[TopicIdPartition, CompletableFuture[RemoteLogReadResult]], + remoteFetchInfos: util.Map[TopicIdPartition, RemoteStorageFetchInfo], remoteFetchMaxWaitMs: Long, fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)], fetchParams: FetchParams, @@ -55,7 +57,7 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], * * Case a: This broker is no longer the leader of the partition it tries to fetch * Case b: This broker does not know the partition it tries to fetch - * Case c: The remote storage read request completed (succeeded or failed) + * Case c: All the remote storage read request completed (succeeded or failed) * Case d: The partition is in an offline log directory on this broker * * Upon completion, should return whatever data is available for each valid partition @@ -80,7 +82,8 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], return forceComplete() } } - if (remoteFetchResult.isDone) // Case c + // Case c + if (remoteFetchResults.values().stream().allMatch(taskResult => taskResult.isDone)) forceComplete() else false @@ -89,8 +92,13 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], override def onExpiration(): Unit = { // cancel the remote storage read task, if it has not been executed yet and // avoid interrupting the task if it is already running as it may force closing opened/cached resources as transaction index. - val cancelled = remoteFetchTask.cancel(false) - if (!cancelled) debug(s"Remote fetch task for RemoteStorageFetchInfo: $remoteFetchInfo could not be cancelled and its isDone value is ${remoteFetchTask.isDone}") + remoteFetchTasks.forEach { (topicIdPartition, task) => + if (task != null && !task.isDone) { + if (!task.cancel(false)) { + debug(s"Remote fetch task for remoteFetchInfo: ${remoteFetchInfos.get(topicIdPartition)} could not be cancelled.") + } + } + } DelayedRemoteFetchMetrics.expiredRequestMeter.mark() } @@ -100,7 +108,8 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], */ override def onComplete(): Unit = { val fetchPartitionData = localReadResults.map { case (tp, result) => - if (tp.topicPartition().equals(remoteFetchInfo.topicPartition) + val remoteFetchResult = remoteFetchResults.get(tp) + if (remoteFetchInfos.containsKey(tp) && remoteFetchResult.isDone && result.error == Errors.NONE && result.info.delayedRemoteStorageFetch.isPresent) { @@ -114,9 +123,9 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], result.leaderLogStartOffset, info.records, Optional.empty(), - if (result.lastStableOffset.isDefined) OptionalLong.of(result.lastStableOffset.get) else OptionalLong.empty(), + if (result.lastStableOffset.isPresent) OptionalLong.of(result.lastStableOffset.getAsLong) else OptionalLong.empty(), info.abortedTransactions, - if (result.preferredReadReplica.isDefined) OptionalInt.of(result.preferredReadReplica.get) else OptionalInt.empty(), + if (result.preferredReadReplica.isPresent) OptionalInt.of(result.preferredReadReplica.getAsInt) else OptionalInt.empty(), false) } } else { @@ -129,6 +138,9 @@ class DelayedRemoteFetch(remoteFetchTask: Future[Void], } object DelayedRemoteFetchMetrics { - private val metricsGroup = new KafkaMetricsGroup(DelayedRemoteFetchMetrics.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "DelayedRemoteFetchMetrics" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) val expiredRequestMeter: Meter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS) } diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala index 27b5c8e16d417..124a4c7b78f4c 100755 --- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala +++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala @@ -27,13 +27,14 @@ import kafka.raft.KafkaRaftManager import kafka.server.DynamicBrokerConfig._ import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.common.Reconfigurable -import org.apache.kafka.network.EndPoint +import org.apache.kafka.common.Endpoint import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, ConfigResource, SaslConfigs, SslConfigs} import org.apache.kafka.common.metadata.{ConfigRecord, MetadataRecordType} import org.apache.kafka.common.metrics.{Metrics, MetricsReporter} import org.apache.kafka.common.network.{ListenerName, ListenerReconfigurable} import org.apache.kafka.common.security.authenticator.LoginManager +import org.apache.kafka.common.utils.LogContext import org.apache.kafka.common.utils.{BufferSupplier, ConfigUtils, Utils} import org.apache.kafka.config import org.apache.kafka.coordinator.transaction.TransactionLogConfig @@ -48,15 +49,16 @@ import org.apache.kafka.server.telemetry.ClientTelemetry import org.apache.kafka.snapshot.RecordsSnapshotReader import org.apache.kafka.storage.internals.log.{LogCleaner, LogConfig} +import scala.util.Using import scala.collection._ import scala.jdk.CollectionConverters._ /** * Dynamic broker configurations may be defined at two levels: *
      - *
    • Per-broker configurations are persisted at the controller and can be described + *
    • Per-broker configurations are persisted at the controller and can be described * or altered using AdminClient with the resource name brokerId.
    • - *
    • Cluster-wide default configurations are persisted at the cluster level and can be + *
    • Cluster-wide default configurations are persisted at the cluster level and can be * described or altered using AdminClient with an empty resource name.
    • *
    * The order of precedence for broker configs is: @@ -96,7 +98,8 @@ object DynamicBrokerConfig { DynamicListenerConfig.ReconfigurableConfigs ++ SocketServer.ReconfigurableConfigs ++ DynamicProducerStateManagerConfig ++ - DynamicRemoteLogConfig.ReconfigurableConfigs + DynamicRemoteLogConfig.ReconfigurableConfigs ++ + Set(AbstractConfig.CONFIG_PROVIDERS_CONFIG) private val ClusterLevelListenerConfigs = Set(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG) private val PerBrokerConfigs = (DynamicSecurityConfigs ++ DynamicListenerConfig.ReconfigurableConfigs).diff( @@ -195,7 +198,8 @@ object DynamicBrokerConfig { private[server] def readDynamicBrokerConfigsFromSnapshot( raftManager: KafkaRaftManager[ApiMessageAndVersion], config: KafkaConfig, - quotaManagers: QuotaFactory.QuotaManagers + quotaManagers: QuotaFactory.QuotaManagers, + logContext: LogContext ): Unit = { def putOrRemoveIfNull(props: Properties, key: String, value: String): Unit = { if (value == null) { @@ -204,38 +208,42 @@ object DynamicBrokerConfig { props.put(key, value) } } - raftManager.replicatedLog.latestSnapshotId().ifPresent(latestSnapshotId => { - raftManager.replicatedLog.readSnapshot(latestSnapshotId).ifPresent(rawSnapshotReader => { - val reader = RecordsSnapshotReader.of( - rawSnapshotReader, - raftManager.recordSerde, - BufferSupplier.create(), - KafkaRaftClient.MAX_BATCH_SIZE_BYTES, - true - ) - val dynamicPerBrokerConfigs = new Properties() - val dynamicDefaultConfigs = new Properties() - while (reader.hasNext) { - val batch = reader.next() - batch.forEach(record => { - if (record.message().apiKey() == MetadataRecordType.CONFIG_RECORD.id) { - val configRecord = record.message().asInstanceOf[ConfigRecord] - if (DynamicBrokerConfig.AllDynamicConfigs.contains(configRecord.name()) && - configRecord.resourceType() == ConfigResource.Type.BROKER.id()) { - if (configRecord.resourceName().isEmpty) { - putOrRemoveIfNull(dynamicDefaultConfigs, configRecord.name(), configRecord.value()) - } else if (configRecord.resourceName() == config.brokerId.toString) { - putOrRemoveIfNull(dynamicPerBrokerConfigs, configRecord.name(), configRecord.value()) - } + raftManager.replicatedLog.latestSnapshotId().ifPresent { latestSnapshotId => + raftManager.replicatedLog.readSnapshot(latestSnapshotId).ifPresent { rawSnapshotReader => + Using.resource( + RecordsSnapshotReader.of( + rawSnapshotReader, + raftManager.recordSerde, + BufferSupplier.create(), + KafkaRaftClient.MAX_BATCH_SIZE_BYTES, + true, + logContext + ) + ) { reader => + val dynamicPerBrokerConfigs = new Properties() + val dynamicDefaultConfigs = new Properties() + while (reader.hasNext) { + val batch = reader.next() + batch.forEach { record => + if (record.message().apiKey() == MetadataRecordType.CONFIG_RECORD.id) { + val configRecord = record.message().asInstanceOf[ConfigRecord] + if (DynamicBrokerConfig.AllDynamicConfigs.contains(configRecord.name()) && + configRecord.resourceType() == ConfigResource.Type.BROKER.id()) { + if (configRecord.resourceName().isEmpty) { + putOrRemoveIfNull(dynamicDefaultConfigs, configRecord.name(), configRecord.value()) + } else if (configRecord.resourceName() == config.brokerId.toString) { + putOrRemoveIfNull(dynamicPerBrokerConfigs, configRecord.name(), configRecord.value()) + } + } } } - }) + } + val configHandler = new BrokerConfigHandler(config, quotaManagers) + configHandler.processConfigChanges("", dynamicDefaultConfigs) + configHandler.processConfigChanges(config.brokerId.toString, dynamicPerBrokerConfigs) } - val configHandler = new BrokerConfigHandler(config, quotaManagers) - configHandler.processConfigChanges("", dynamicPerBrokerConfigs) - configHandler.processConfigChanges(config.brokerId.toString, dynamicPerBrokerConfigs) - }) - }) + } + } } } @@ -961,12 +969,12 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi def validateReconfiguration(newConfig: KafkaConfig): Unit = { val oldConfig = server.config - val newListeners = newConfig.listeners.map(_.listenerName).toSet - val oldAdvertisedListeners = oldConfig.effectiveAdvertisedBrokerListeners.map(_.listenerName).toSet - val oldListeners = oldConfig.listeners.map(_.listenerName).toSet + val newListeners = newConfig.listeners.map(l => ListenerName.normalised(l.listener)).toSet + val oldAdvertisedListeners = oldConfig.effectiveAdvertisedBrokerListeners.map(l => ListenerName.normalised(l.listener)).toSet + val oldListeners = oldConfig.listeners.map(l => ListenerName.normalised(l.listener)).toSet if (!oldAdvertisedListeners.subsetOf(newListeners)) throw new ConfigException(s"Advertised listeners '$oldAdvertisedListeners' must be a subset of listeners '$newListeners'") - if (!newListeners.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet)) + if (!newListeners.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet.asScala)) throw new ConfigException(s"Listeners '$newListeners' must be subset of listener map '${newConfig.effectiveListenerSecurityProtocolMap}'") newListeners.intersect(oldListeners).foreach { listenerName => def immutableListenerConfigs(kafkaConfig: KafkaConfig, prefix: String): Map[String, AnyRef] = { @@ -978,7 +986,7 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi if (immutableListenerConfigs(newConfig, listenerName.configPrefix) != immutableListenerConfigs(oldConfig, listenerName.configPrefix)) throw new ConfigException(s"Configs cannot be updated dynamically for existing listener $listenerName, " + "restart broker or create a new listener for update") - if (oldConfig.effectiveListenerSecurityProtocolMap(listenerName) != newConfig.effectiveListenerSecurityProtocolMap(listenerName)) + if (oldConfig.effectiveListenerSecurityProtocolMap.get(listenerName) != newConfig.effectiveListenerSecurityProtocolMap.get(listenerName)) throw new ConfigException(s"Security protocol cannot be updated for existing listener $listenerName") } } @@ -988,8 +996,8 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi val newListenerMap = listenersToMap(newListeners) val oldListeners = oldConfig.listeners val oldListenerMap = listenersToMap(oldListeners) - val listenersRemoved = oldListeners.filterNot(e => newListenerMap.contains(e.listenerName)) - val listenersAdded = newListeners.filterNot(e => oldListenerMap.contains(e.listenerName)) + val listenersRemoved = oldListeners.filterNot(e => newListenerMap.contains(ListenerName.normalised(e.listener))) + val listenersAdded = newListeners.filterNot(e => oldListenerMap.contains(ListenerName.normalised(e.listener))) if (listenersRemoved.nonEmpty || listenersAdded.nonEmpty) { LoginManager.closeAll() // Clear SASL login cache to force re-login if (listenersRemoved.nonEmpty) server.socketServer.removeListeners(listenersRemoved) @@ -997,8 +1005,8 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi } } - private def listenersToMap(listeners: Seq[EndPoint]): Map[ListenerName, EndPoint] = - listeners.map(e => (e.listenerName, e)).toMap + private def listenersToMap(listeners: Seq[Endpoint]): Map[ListenerName, Endpoint] = + listeners.map(e => (ListenerName.normalised(e.listener), e)).toMap } @@ -1022,9 +1030,19 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w if (RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP.equals(k) || RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP.equals(k) || - RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP.equals(k)) { + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP.equals(k) || + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP.equals(k)) { val newValue = v.asInstanceOf[Int] - val oldValue = server.config.getInt(k) + val oldValue: Int = { + // This logic preserves backward compatibility in scenarios where + // `remote.log.manager.thread.pool.size` is configured in config file, + // but `remote.log.manager.follower.thread.pool.size` is set dynamically. + // This can be removed once `remote.log.manager.thread.pool.size` is removed. + if (RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP.equals(k)) + server.config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize() + else + server.config.getInt(k) + } if (newValue != oldValue) { val errorMsg = s"Dynamic thread count update validation failed for $k=$v" if (newValue <= 0) @@ -1076,6 +1094,9 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w if (newRLMConfig.remoteLogManagerExpirationThreadPoolSize() != oldRLMConfig.remoteLogManagerExpirationThreadPoolSize()) remoteLogManager.resizeExpirationThreadPool(newRLMConfig.remoteLogManagerExpirationThreadPoolSize()) + if (newRLMConfig.remoteLogManagerFollowerThreadPoolSize() != oldRLMConfig.remoteLogManagerFollowerThreadPoolSize()) + remoteLogManager.resizeFollowerThreadPool(newRLMConfig.remoteLogManagerFollowerThreadPoolSize()) + if (newRLMConfig.remoteLogReaderThreads() != oldRLMConfig.remoteLogReaderThreads()) remoteLogManager.resizeReaderThreadPool(newRLMConfig.remoteLogReaderThreads()) } @@ -1101,6 +1122,7 @@ object DynamicRemoteLogConfig { RemoteLogManagerConfig.REMOTE_LIST_OFFSETS_REQUEST_TIMEOUT_MS_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP ) } diff --git a/core/src/main/scala/kafka/server/FetchSession.scala b/core/src/main/scala/kafka/server/FetchSession.scala index 51db1fcb092fe..4bbb4c47e3fa0 100644 --- a/core/src/main/scala/kafka/server/FetchSession.scala +++ b/core/src/main/scala/kafka/server/FetchSession.scala @@ -803,7 +803,10 @@ class FetchSessionCacheShard(private val maxEntries: Int, } } object FetchSessionCache { - private[server] val metricsGroup = new KafkaMetricsGroup(classOf[FetchSessionCache]) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "FetchSessionCache" + private[server] val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private[server] val counter = new AtomicInteger(0) } diff --git a/core/src/main/scala/kafka/server/ForwardingManager.scala b/core/src/main/scala/kafka/server/ForwardingManager.scala index c067000bf0c2c..7737d2d2171f2 100644 --- a/core/src/main/scala/kafka/server/ForwardingManager.scala +++ b/core/src/main/scala/kafka/server/ForwardingManager.scala @@ -26,6 +26,7 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, EnvelopeRequest, EnvelopeResponse, RequestContext, RequestHeader} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} +import org.apache.kafka.server.metrics.ForwardingManagerMetrics import java.util.Optional import java.util.concurrent.TimeUnit @@ -117,7 +118,7 @@ class ForwardingManagerImpl( metrics: Metrics ) extends ForwardingManager with AutoCloseable with Logging { - val forwardingManagerMetrics: ForwardingManagerMetrics = ForwardingManagerMetrics(metrics, channelManager.getTimeoutMs) + val forwardingManagerMetrics: ForwardingManagerMetrics = new ForwardingManagerMetrics(metrics, channelManager.getTimeoutMs) override def forwardRequest( requestContext: RequestContext, @@ -133,7 +134,7 @@ class ForwardingManagerImpl( class ForwardingResponseHandler extends ControllerRequestCompletionHandler { override def onComplete(clientResponse: ClientResponse): Unit = { - forwardingManagerMetrics.queueLength.getAndDecrement() + forwardingManagerMetrics.decrementQueueLength() forwardingManagerMetrics.remoteTimeMsHist.record(clientResponse.requestLatencyMs()) forwardingManagerMetrics.queueTimeMsHist.record(clientResponse.receivedTimeMs() - clientResponse.requestLatencyMs() - requestCreationTimeMs) @@ -174,14 +175,14 @@ class ForwardingManagerImpl( override def onTimeout(): Unit = { debug(s"Forwarding of the request ${requestToString()} failed due to timeout exception") - forwardingManagerMetrics.queueLength.getAndDecrement() + forwardingManagerMetrics.decrementQueueLength() forwardingManagerMetrics.queueTimeMsHist.record(channelManager.getTimeoutMs) val response = requestBody.getErrorResponse(new TimeoutException()) responseCallback(Option(response)) } } - forwardingManagerMetrics.queueLength.getAndIncrement() + forwardingManagerMetrics.incrementQueueLength() channelManager.sendRequest(envelopeRequest, new ForwardingResponseHandler) } diff --git a/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala b/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala deleted file mode 100644 index a846f8c49551d..0000000000000 --- a/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import org.apache.kafka.common.MetricName -import org.apache.kafka.common.metrics.{Gauge, MetricConfig, Metrics} -import org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing -import org.apache.kafka.common.metrics.stats.{Percentile, Percentiles} - -import java.util.concurrent.atomic.AtomicInteger - -final class ForwardingManagerMetrics private ( - metrics: Metrics, - timeoutMs: Long, -) extends AutoCloseable { - import ForwardingManagerMetrics._ - - /** - * A histogram describing the amount of time in milliseconds each admin request spends in the broker's forwarding manager queue, waiting to be sent to the controller. - * This does not include the time that the request spends waiting for a response from the controller. - */ - val queueTimeMsHist: LatencyHistogram = new LatencyHistogram(metrics, queueTimeMsName, metricGroupName, timeoutMs) - - /** - * A histogram describing the amount of time in milliseconds each request sent by the ForwardingManager spends waiting for a response. - * This does not include the time spent in the queue. - */ - val remoteTimeMsHist: LatencyHistogram = new LatencyHistogram(metrics, remoteTimeMsName, metricGroupName, timeoutMs) - - val queueLengthName: MetricName = metrics.metricName( - "QueueLength", - metricGroupName, - "The current number of RPCs that are waiting in the broker's forwarding manager queue, waiting to be sent to the controller." - ) - val queueLength: AtomicInteger = new AtomicInteger(0) - metrics.addMetric(queueLengthName, new FuncGauge(_ => queueLength.get())) - - override def close(): Unit = { - queueTimeMsHist.close() - remoteTimeMsHist.close() - metrics.removeMetric(queueLengthName) - } -} - -object ForwardingManagerMetrics { - - val metricGroupName = "ForwardingManager" - val queueTimeMsName = "QueueTimeMs" - val remoteTimeMsName = "RemoteTimeMs" - - final class LatencyHistogram ( - metrics: Metrics, - name: String, - group: String, - maxLatency: Long - ) extends AutoCloseable { - private val sensor = metrics.sensor(name) - val latencyP99Name: MetricName = metrics.metricName(s"$name.p99", group) - val latencyP999Name: MetricName = metrics.metricName(s"$name.p999", group) - - sensor.add(new Percentiles( - 4000, - maxLatency, - BucketSizing.CONSTANT, - new Percentile(latencyP99Name, 99), - new Percentile(latencyP999Name, 99.9) - )) - - override def close(): Unit = { - metrics.removeSensor(name) - metrics.removeMetric(latencyP99Name) - metrics.removeMetric(latencyP999Name) - } - - def record(latencyMs: Long): Unit = sensor.record(latencyMs) - } - - private final class FuncGauge[T](func: Long => T) extends Gauge[T] { - override def value(config: MetricConfig, now: Long): T = { - func(now) - } - } - - def apply(metrics: Metrics, timeoutMs: Long): ForwardingManagerMetrics = new ForwardingManagerMetrics(metrics, timeoutMs) -} diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 6a22963ac7d6a..6ca9014f65410 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -27,6 +27,7 @@ import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.admin.EndpointType import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.acl.AclOperation._ +import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, SHARE_GROUP_STATE_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME, isInternal} import org.apache.kafka.common.internals.{FatalExitError, Plugin, Topic} @@ -34,7 +35,6 @@ import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.{AddPartit import org.apache.kafka.common.message.DeleteRecordsResponseData.{DeleteRecordsPartitionResult, DeleteRecordsTopicResult} import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.MetadataResponseData.{MetadataResponsePartition, MetadataResponseTopic} @@ -60,14 +60,15 @@ import org.apache.kafka.common.{Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.coordinator.group.{Group, GroupConfig, GroupConfigManager, GroupCoordinator} import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.metadata.{ConfigRepository, MetadataCache} -import org.apache.kafka.server.{ApiVersionManager, ClientMetricsManager, DelegationTokenManager, ProcessRole} +import org.apache.kafka.security.DelegationTokenManager +import org.apache.kafka.server.{ApiVersionManager, ClientMetricsManager, ProcessRole} import org.apache.kafka.server.authorizer._ -import org.apache.kafka.server.common.{GroupVersion, RequestLocal, TransactionVersion} -import org.apache.kafka.server.config.DelegationTokenManagerConfigs +import org.apache.kafka.server.common.{GroupVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.AppendOrigin import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -78,7 +79,6 @@ import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} import java.util.stream.Collectors import java.util.{Collections, Optional} import scala.annotation.nowarn -import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable` import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ @@ -92,7 +92,7 @@ class KafkaApis(val requestChannel: RequestChannel, val replicaManager: ReplicaManager, val groupCoordinator: GroupCoordinator, val txnCoordinator: TransactionCoordinator, - val shareCoordinator: Option[ShareCoordinator], + val shareCoordinator: ShareCoordinator, val autoTopicCreationManager: AutoTopicCreationManager, val brokerId: Int, val config: KafkaConfig, @@ -112,7 +112,7 @@ class KafkaApis(val requestChannel: RequestChannel, val groupConfigManager: GroupConfigManager ) extends ApiRequestHandler with Logging { - type FetchResponseStats = Map[TopicPartition, RecordValidationStats] + type ProduceResponseStats = Map[TopicIdPartition, RecordValidationStats] this.logIdent = "[KafkaApi-%d] ".format(brokerId) val configHelper = new ConfigHelper(metadataCache, config, configRepository) val authHelper = new AuthHelper(authorizerPlugin) @@ -228,21 +228,21 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => handleDescribeTopicPartitionsRequest(request) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) - case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => handleListClientMetricsResources(request) + case ApiKeys.LIST_CONFIG_RESOURCES => handleListConfigResources(request) case ApiKeys.ADD_RAFT_VOTER => forwardToController(request) case ApiKeys.REMOVE_RAFT_VOTER => forwardToController(request) case ApiKeys.SHARE_GROUP_HEARTBEAT => handleShareGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.SHARE_GROUP_DESCRIBE => handleShareGroupDescribe(request).exceptionally(handleError) - case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request) - case ApiKeys.SHARE_ACKNOWLEDGE => handleShareAcknowledgeRequest(request) - case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => handleInitializeShareGroupStateRequest(request) - case ApiKeys.READ_SHARE_GROUP_STATE => handleReadShareGroupStateRequest(request) - case ApiKeys.WRITE_SHARE_GROUP_STATE => handleWriteShareGroupStateRequest(request) - case ApiKeys.DELETE_SHARE_GROUP_STATE => handleDeleteShareGroupStateRequest(request) - case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => handleReadShareGroupStateSummaryRequest(request) - case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => handleDescribeShareGroupOffsetsRequest(request) - case ApiKeys.ALTER_SHARE_GROUP_OFFSETS => handleAlterShareGroupOffsetsRequest(request) - case ApiKeys.DELETE_SHARE_GROUP_OFFSETS => handleDeleteShareGroupOffsetsRequest(request) + case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request).exceptionally(handleError) + case ApiKeys.SHARE_ACKNOWLEDGE => handleShareAcknowledgeRequest(request).exceptionally(handleError) + case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => handleInitializeShareGroupStateRequest(request).exceptionally(handleError) + case ApiKeys.READ_SHARE_GROUP_STATE => handleReadShareGroupStateRequest(request).exceptionally(handleError) + case ApiKeys.WRITE_SHARE_GROUP_STATE => handleWriteShareGroupStateRequest(request).exceptionally(handleError) + case ApiKeys.DELETE_SHARE_GROUP_STATE => handleDeleteShareGroupStateRequest(request).exceptionally(handleError) + case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => handleReadShareGroupStateSummaryRequest(request).exceptionally(handleError) + case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => handleDescribeShareGroupOffsetsRequest(request).exceptionally(handleError) + case ApiKeys.ALTER_SHARE_GROUP_OFFSETS => handleAlterShareGroupOffsetsRequest(request).exceptionally(handleError) + case ApiKeys.DELETE_SHARE_GROUP_OFFSETS => handleDeleteShareGroupOffsetsRequest(request).exceptionally(handleError) case ApiKeys.STREAMS_GROUP_DESCRIBE => handleStreamsGroupDescribe(request).exceptionally(handleError) case ApiKeys.STREAMS_GROUP_HEARTBEAT => handleStreamsGroupHeartbeat(request).exceptionally(handleError) case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}") @@ -275,11 +275,21 @@ class KafkaApis(val requestChannel: RequestChannel, ): CompletableFuture[Unit] = { val offsetCommitRequest = request.body[OffsetCommitRequest] - // Reject the request if not authorized to the group + // Reject the request if not authorized to the group. if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) { requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) } else { + val useTopicIds = OffsetCommitResponse.useTopicIds(request.header.apiVersion) + + if (useTopicIds) { + offsetCommitRequest.data.topics.forEach { topic => + if (topic.topicId != Uuid.ZERO_UUID) { + metadataCache.getTopicName(topic.topicId).ifPresent(name => topic.setName(name)) + } + } + } + val authorizedTopics = authHelper.filterByAuthorized( request.context, READ, @@ -287,28 +297,40 @@ class KafkaApis(val requestChannel: RequestChannel, offsetCommitRequest.data.topics.asScala )(_.name) - val responseBuilder = new OffsetCommitResponse.Builder() + val responseBuilder = OffsetCommitResponse.newBuilder(useTopicIds) val authorizedTopicsRequest = new mutable.ArrayBuffer[OffsetCommitRequestData.OffsetCommitRequestTopic]() offsetCommitRequest.data.topics.forEach { topic => - if (!authorizedTopics.contains(topic.name)) { + if (useTopicIds && topic.name.isEmpty) { + // If the topic name is undefined, it means that the topic id is unknown so we add + // the topic and all its partitions to the response with UNKNOWN_TOPIC_ID. + responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( + topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_ID) + } else if (!authorizedTopics.contains(topic.name)) { // If the topic is not authorized, we add the topic and all its partitions // to the response with TOPIC_AUTHORIZATION_FAILED. responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( - topic.name, topic.partitions, _.partitionIndex, Errors.TOPIC_AUTHORIZATION_FAILED) + topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.TOPIC_AUTHORIZATION_FAILED) } else if (!metadataCache.contains(topic.name)) { // If the topic is unknown, we add the topic and all its partitions // to the response with UNKNOWN_TOPIC_OR_PARTITION. responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( - topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) + topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) } else { // Otherwise, we check all partitions to ensure that they all exist. - val topicWithValidPartitions = new OffsetCommitRequestData.OffsetCommitRequestTopic().setName(topic.name) + val topicWithValidPartitions = new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(topic.topicId) + .setName(topic.name) topic.partitions.forEach { partition => - if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).isPresent()) { + if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).isPresent) { topicWithValidPartitions.partitions.add(partition) } else { - responseBuilder.addPartition(topic.name, partition.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) + responseBuilder.addPartition( + topic.topicId, + topic.name, + partition.partitionIndex, + Errors.UNKNOWN_TOPIC_OR_PARTITION + ) } } @@ -322,42 +344,23 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendMaybeThrottle(request, responseBuilder.build()) CompletableFuture.completedFuture(()) } else { - // For version > 0, store offsets in Coordinator. - commitOffsetsToCoordinator( - request, - offsetCommitRequest, - authorizedTopicsRequest, - responseBuilder, - requestLocal - ) - } - } - } - - private def commitOffsetsToCoordinator( - request: RequestChannel.Request, - offsetCommitRequest: OffsetCommitRequest, - authorizedTopicsRequest: mutable.ArrayBuffer[OffsetCommitRequestData.OffsetCommitRequestTopic], - responseBuilder: OffsetCommitResponse.Builder, - requestLocal: RequestLocal - ): CompletableFuture[Unit] = { - val offsetCommitRequestData = new OffsetCommitRequestData() - .setGroupId(offsetCommitRequest.data.groupId) - .setMemberId(offsetCommitRequest.data.memberId) - .setGenerationIdOrMemberEpoch(offsetCommitRequest.data.generationIdOrMemberEpoch) - .setRetentionTimeMs(offsetCommitRequest.data.retentionTimeMs) - .setGroupInstanceId(offsetCommitRequest.data.groupInstanceId) - .setTopics(authorizedTopicsRequest.asJava) - - groupCoordinator.commitOffsets( - request.context, - offsetCommitRequestData, - requestLocal.bufferSupplier - ).handle[Unit] { (results, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, responseBuilder.merge(results).build()) + groupCoordinator.commitOffsets( + request.context, + new OffsetCommitRequestData() + .setGroupId(offsetCommitRequest.data.groupId) + .setMemberId(offsetCommitRequest.data.memberId) + .setGenerationIdOrMemberEpoch(offsetCommitRequest.data.generationIdOrMemberEpoch) + .setRetentionTimeMs(offsetCommitRequest.data.retentionTimeMs) + .setGroupInstanceId(offsetCommitRequest.data.groupInstanceId) + .setTopics(authorizedTopicsRequest.asJava), + requestLocal.bufferSupplier + ).handle[Unit] { (results, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, responseBuilder.merge(results).build()) + } + } } } } @@ -394,57 +397,73 @@ class KafkaApis(val requestChannel: RequestChannel, } } - val unauthorizedTopicResponses = mutable.Map[TopicPartition, PartitionResponse]() - val nonExistingTopicResponses = mutable.Map[TopicPartition, PartitionResponse]() - val invalidRequestResponses = mutable.Map[TopicPartition, PartitionResponse]() - val authorizedRequestInfo = mutable.Map[TopicPartition, MemoryRecords]() + val unauthorizedTopicResponses = mutable.Map[TopicIdPartition, PartitionResponse]() + val nonExistingTopicResponses = mutable.Map[TopicIdPartition, PartitionResponse]() + val invalidRequestResponses = mutable.Map[TopicIdPartition, PartitionResponse]() + val authorizedRequestInfo = mutable.Map[TopicIdPartition, MemoryRecords]() + val topicIdToPartitionData = new mutable.ArrayBuffer[(TopicIdPartition, ProduceRequestData.PartitionProduceData)] + + produceRequest.data.topicData.forEach { topic => + topic.partitionData.forEach { partition => + val (topicName, topicId) = if (topic.topicId().equals(Uuid.ZERO_UUID)) { + (topic.name(), metadataCache.getTopicId(topic.name())) + } else { + (metadataCache.getTopicName(topic.topicId).orElse(topic.name), topic.topicId()) + } + + val topicPartition = new TopicPartition(topicName, partition.index()) + // To be compatible with the old version, only return UNKNOWN_TOPIC_ID if request version uses topicId, but the corresponding topic name can't be found. + if (topicName.isEmpty && request.header.apiVersion > 12) + nonExistingTopicResponses += new TopicIdPartition(topicId, topicPartition) -> new PartitionResponse(Errors.UNKNOWN_TOPIC_ID) + else + topicIdToPartitionData += new TopicIdPartition(topicId, topicPartition) -> partition + } + } // cache the result to avoid redundant authorization calls - val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC, - produceRequest.data().topicData().asScala)(_.name()) + val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC, topicIdToPartitionData)(_._1.topic) - produceRequest.data.topicData.forEach(topic => topic.partitionData.forEach { partition => - val topicPartition = new TopicPartition(topic.name, partition.index) + topicIdToPartitionData.foreach { case (topicIdPartition, partition) => // This caller assumes the type is MemoryRecords and that is true on current serialization // We cast the type to avoid causing big change to code base. // https://issues.apache.org/jira/browse/KAFKA-10698 val memoryRecords = partition.records.asInstanceOf[MemoryRecords] - if (!authorizedTopics.contains(topicPartition.topic)) - unauthorizedTopicResponses += topicPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED) - else if (!metadataCache.contains(topicPartition)) - nonExistingTopicResponses += topicPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION) + if (!authorizedTopics.contains(topicIdPartition.topic)) + unauthorizedTopicResponses += topicIdPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED) + else if (!metadataCache.contains(topicIdPartition.topicPartition)) + nonExistingTopicResponses += topicIdPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION) else try { ProduceRequest.validateRecords(request.header.apiVersion, memoryRecords) - authorizedRequestInfo += (topicPartition -> memoryRecords) + authorizedRequestInfo += (topicIdPartition -> memoryRecords) } catch { case e: ApiException => - invalidRequestResponses += topicPartition -> new PartitionResponse(Errors.forException(e)) + invalidRequestResponses += topicIdPartition -> new PartitionResponse(Errors.forException(e)) } - }) + } // the callback for sending a produce response // The construction of ProduceResponse is able to accept auto-generated protocol data so // KafkaApis#handleProduceRequest should apply auto-generated protocol to avoid extra conversion. // https://issues.apache.org/jira/browse/KAFKA-10730 @nowarn("cat=deprecation") - def sendResponseCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { + def sendResponseCallback(responseStatus: Map[TopicIdPartition, PartitionResponse]): Unit = { val mergedResponseStatus = responseStatus ++ unauthorizedTopicResponses ++ nonExistingTopicResponses ++ invalidRequestResponses var errorInResponse = false val nodeEndpoints = new mutable.HashMap[Int, Node] - mergedResponseStatus.foreachEntry { (topicPartition, status) => + mergedResponseStatus.foreachEntry { (topicIdPartition, status) => if (status.error != Errors.NONE) { errorInResponse = true debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format( request.header.correlationId, request.header.clientId, - topicPartition, + topicIdPartition, status.error.exceptionName)) if (request.header.apiVersion >= 10) { status.error match { case Errors.NOT_LEADER_OR_FOLLOWER => - val leaderNode = getCurrentLeader(topicPartition, request.context.listenerName) + val leaderNode = getCurrentLeader(topicIdPartition.topicPartition(), request.context.listenerName) leaderNode.node.foreach { node => nodeEndpoints.put(node.id(), node) } @@ -462,7 +481,7 @@ class KafkaApis(val requestChannel: RequestChannel, // that the request quota is not enforced if acks == 0. val timeMs = time.milliseconds() val requestSize = request.sizeInBytes - val bandwidthThrottleTimeMs = quotas.produce.maybeRecordAndGetThrottleTimeMs(request, requestSize, timeMs) + val bandwidthThrottleTimeMs = quotas.produce.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), requestSize, timeMs) val requestThrottleTimeMs = if (produceRequest.acks == 0) 0 else quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) @@ -501,9 +520,9 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def processingStatsCallback(processingStats: FetchResponseStats): Unit = { - processingStats.foreachEntry { (tp, info) => - updateRecordConversionStats(request, tp, info) + def processingStatsCallback(processingStats: ProduceResponseStats): Unit = { + processingStats.foreachEntry { (topicIdPartition, info) => + updateRecordConversionStats(request, topicIdPartition.topicPartition(), info) } } @@ -609,12 +628,12 @@ class KafkaApis(val requestChannel: RequestChannel, val partitions = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] val reassigningPartitions = mutable.Set[TopicIdPartition]() val nodeEndpoints = new mutable.HashMap[Int, Node] - responsePartitionData.foreach { case (tp, data) => + responsePartitionData.foreach { case (topicIdPartition, data) => val abortedTransactions = data.abortedTransactions.orElse(null) val lastStableOffset: Long = data.lastStableOffset.orElse(FetchResponse.INVALID_LAST_STABLE_OFFSET) - if (data.isReassignmentFetch) reassigningPartitions.add(tp) + if (data.isReassignmentFetch) reassigningPartitions.add(topicIdPartition) val partitionData = new FetchResponseData.PartitionData() - .setPartitionIndex(tp.partition) + .setPartitionIndex(topicIdPartition.partition) .setErrorCode(maybeDownConvertStorageError(data.error).code) .setHighWatermark(data.highWatermark) .setLastStableOffset(lastStableOffset) @@ -626,7 +645,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (versionId >= 16) { data.error match { case Errors.NOT_LEADER_OR_FOLLOWER | Errors.FENCED_LEADER_EPOCH => - val leaderNode = getCurrentLeader(tp.topicPartition(), request.context.listenerName) + val leaderNode = getCurrentLeader(topicIdPartition.topicPartition(), request.context.listenerName) leaderNode.node.foreach { node => nodeEndpoints.put(node.id(), node) } @@ -638,7 +657,7 @@ class KafkaApis(val requestChannel: RequestChannel, } data.divergingEpoch.ifPresent(epoch => partitionData.setDivergingEpoch(epoch)) - partitions.put(tp, partitionData) + partitions.put(topicIdPartition, partitionData) } erroneous.foreach { case (tp, data) => partitions.put(tp, data) } @@ -672,14 +691,14 @@ class KafkaApis(val requestChannel: RequestChannel, val responseSize = fetchContext.getResponseSize(partitions, versionId) val timeMs = time.milliseconds() val requestThrottleTimeMs = quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) - val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs) + val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), responseSize, timeMs) val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs) val fetchResponse = if (maxThrottleTimeMs > 0) { request.apiThrottleTimeMs = maxThrottleTimeMs // Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value // from the fetch quota because we are going to return an empty response. - quotas.fetch.unrecordQuotaSensor(request, responseSize, timeMs) + quotas.fetch.unrecordQuotaSensor(request.session, request.header.clientId(), responseSize, timeMs) if (bandwidthThrottleTimeMs > requestThrottleTimeMs) { requestHelper.throttle(quotas.fetch, request, bandwidthThrottleTimeMs) } else { @@ -711,7 +730,7 @@ class KafkaApis(val requestChannel: RequestChannel, val maxQuotaWindowBytes = if (fetchRequest.isFromFollower) Int.MaxValue else - quotas.fetch.getMaxValueInQuotaWindow(request.session, clientId).toInt + quotas.fetch.maxValueInQuotaWindow(request.session, clientId).toInt val fetchMaxBytes = Math.min(Math.min(fetchRequest.maxBytes, config.fetchMaxBytes), maxQuotaWindowBytes) val fetchMinBytes = Math.min(fetchRequest.minBytes, fetchMaxBytes) @@ -824,7 +843,7 @@ class KafkaApis(val requestChannel: RequestChannel, } else { val nonExistingTopics = topics.diff(topicResponses.asScala.map(_.name).toSet) val nonExistingTopicResponses = if (allowAutoTopicCreation) { - val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request) + val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request.session, request.header.clientId()) autoTopicCreationManager.createTopics(nonExistingTopics, controllerMutationQuota, Some(request.context)) } else { nonExistingTopics.map { topic => @@ -997,9 +1016,11 @@ class KafkaApis(val requestChannel: RequestChannel, groups.forEach { groupOffsetFetch => val isAllPartitions = groupOffsetFetch.topics == null if (!authHelper.authorize(request.context, DESCRIBE, GROUP, groupOffsetFetch.groupId)) { - futures += CompletableFuture.completedFuture(new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(groupOffsetFetch.groupId) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)) + futures += CompletableFuture.completedFuture(OffsetFetchResponse.groupError( + groupOffsetFetch, + Errors.GROUP_AUTHORIZATION_FAILED, + request.header.apiVersion() + )) } else if (isAllPartitions) { futures += fetchAllOffsetsForGroup( request.context, @@ -1018,83 +1039,139 @@ class KafkaApis(val requestChannel: RequestChannel, CompletableFuture.allOf(futures.toArray: _*).handle[Unit] { (_, _) => val groupResponses = new ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseGroup](futures.size) futures.foreach(future => groupResponses += future.get()) - requestHelper.sendMaybeThrottle(request, new OffsetFetchResponse(groupResponses.asJava, request.context.apiVersion)) + requestHelper.sendMaybeThrottle(request, new OffsetFetchResponse.Builder(groupResponses.asJava).build(request.context.apiVersion)) } } private def fetchAllOffsetsForGroup( requestContext: RequestContext, - offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, + groupFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, requireStable: Boolean ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion) + groupCoordinator.fetchAllOffsets( requestContext, - offsetFetchRequest, + groupFetchRequest, requireStable - ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (offsetFetchResponse, exception) => + ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (groupFetchResponse, exception) => if (exception != null) { - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(offsetFetchRequest.groupId) - .setErrorCode(Errors.forException(exception).code) - } else if (offsetFetchResponse.errorCode() != Errors.NONE.code) { - offsetFetchResponse + OffsetFetchResponse.groupError( + groupFetchRequest, + Errors.forException(exception), + requestContext.apiVersion() + ) + } else if (groupFetchResponse.errorCode() != Errors.NONE.code) { + groupFetchResponse } else { // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val (authorizedOffsets, _) = authHelper.partitionSeqByAuthorized( + val authorizedNames = authHelper.filterByAuthorized( requestContext, DESCRIBE, TOPIC, - offsetFetchResponse.topics.asScala + groupFetchResponse.topics.asScala )(_.name) - offsetFetchResponse.setTopics(authorizedOffsets.asJava) + + val topics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics] + groupFetchResponse.topics.forEach { topic => + if (authorizedNames.contains(topic.name)) { + if (useTopicIds) { + // If the topic is not provided by the group coordinator, we set it + // using the metadata cache. + if (topic.topicId == Uuid.ZERO_UUID) { + topic.setTopicId(metadataCache.getTopicId(topic.name)) + } + // If we don't have the topic id at all, we skip the topic because + // we can not serialize it without it. + if (topic.topicId != Uuid.ZERO_UUID) { + topics += topic + } + } else { + topics += topic + } + } + } + groupFetchResponse.setTopics(topics.asJava) } } } private def fetchOffsetsForGroup( requestContext: RequestContext, - offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, + groupFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, requireStable: Boolean ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion) + + if (useTopicIds) { + groupFetchRequest.topics.forEach { topic => + if (topic.topicId != Uuid.ZERO_UUID) { + metadataCache.getTopicName(topic.topicId).ifPresent(name => topic.setName(name)) + } + } + } + // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val (authorizedTopics, unauthorizedTopics) = authHelper.partitionSeqByAuthorized( + val authorizedTopicNames = authHelper.filterByAuthorized( requestContext, DESCRIBE, TOPIC, - offsetFetchRequest.topics.asScala + groupFetchRequest.topics.asScala )(_.name) + val authorizedTopics = new mutable.ArrayBuffer[OffsetFetchRequestData.OffsetFetchRequestTopics] + val errorTopics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics] + + def buildErrorResponse( + topic: OffsetFetchRequestData.OffsetFetchRequestTopics, + error: Errors + ): OffsetFetchResponseData.OffsetFetchResponseTopics = { + val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(topic.topicId) + .setName(topic.name) + topic.partitionIndexes.forEach { partitionIndex => + topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partitionIndex) + .setCommittedOffset(-1) + .setErrorCode(error.code)) + } + topicResponse + } + + groupFetchRequest.topics.forEach { topic => + if (useTopicIds && topic.name.isEmpty) { + errorTopics += buildErrorResponse(topic, Errors.UNKNOWN_TOPIC_ID) + } else if (!authorizedTopicNames.contains(topic.name)) { + errorTopics += buildErrorResponse(topic, Errors.TOPIC_AUTHORIZATION_FAILED) + } else { + authorizedTopics += topic + } + } + groupCoordinator.fetchOffsets( requestContext, new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(offsetFetchRequest.groupId) - .setMemberId(offsetFetchRequest.memberId) - .setMemberEpoch(offsetFetchRequest.memberEpoch) + .setGroupId(groupFetchRequest.groupId) + .setMemberId(groupFetchRequest.memberId) + .setMemberEpoch(groupFetchRequest.memberEpoch) .setTopics(authorizedTopics.asJava), requireStable - ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (offsetFetchResponse, exception) => + ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (groupFetchResponse, exception) => if (exception != null) { - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(offsetFetchRequest.groupId) - .setErrorCode(Errors.forException(exception).code) - } else if (offsetFetchResponse.errorCode() != Errors.NONE.code) { - offsetFetchResponse + OffsetFetchResponse.groupError( + groupFetchRequest, + Errors.forException(exception), + requestContext.apiVersion() + ) + } else if (groupFetchResponse.errorCode() != Errors.NONE.code) { + groupFetchResponse } else { val topics = new util.ArrayList[OffsetFetchResponseData.OffsetFetchResponseTopics]( - offsetFetchResponse.topics.size + unauthorizedTopics.size + groupFetchRequest.topics.size + errorTopics.size ) - topics.addAll(offsetFetchResponse.topics) - unauthorizedTopics.foreach { topic => - val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic.name) - topic.partitionIndexes.forEach { partitionIndex => - topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partitionIndex) - .setCommittedOffset(-1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)) - } - topics.add(topicResponse) - } - offsetFetchResponse.setTopics(topics) + topics.addAll(groupFetchResponse.topics) + topics.addAll(errorTopics.asJava) + groupFetchResponse.setTopics(topics) } } } @@ -1168,9 +1245,6 @@ class KafkaApis(val requestChannel: RequestChannel, else { if (keyType == CoordinatorType.SHARE.id) { authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - if (shareCoordinator.isEmpty) { - return (Errors.INVALID_REQUEST, Node.noNode) - } try { SharePartitionKey.validate(key) } catch { @@ -1188,13 +1262,13 @@ class KafkaApis(val requestChannel: RequestChannel, case CoordinatorType.SHARE => // We know that shareCoordinator is defined at this stage. - (shareCoordinator.get.partitionFor(SharePartitionKey.getInstance(key)), SHARE_GROUP_STATE_TOPIC_NAME) + (shareCoordinator.partitionFor(SharePartitionKey.getInstance(key)), SHARE_GROUP_STATE_TOPIC_NAME) } val topicMetadata = metadataCache.getTopicMetadata(Set(internalTopicName).asJava, request.context.listenerName, false, false).asScala if (topicMetadata.headOption.isEmpty) { - val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request) + val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request.session, request.header.clientId) autoTopicCreationManager.createTopics(Seq(internalTopicName).toSet, controllerMutationQuota, None) (Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode) } else { @@ -1705,7 +1779,7 @@ class KafkaApis(val requestChannel: RequestChannel, } } - val controlRecords = mutable.Map.empty[TopicPartition, MemoryRecords] + val controlRecords = mutable.Map.empty[TopicIdPartition, MemoryRecords] partitionsWithCompatibleMessageFormat.foreach { partition => if (partition.topic == GROUP_METADATA_TOPIC_NAME) { groupCoordinator.completeTransaction( @@ -1733,7 +1807,8 @@ class KafkaApis(val requestChannel: RequestChannel, } else { // Otherwise, the regular appendRecords path is used for all the non __consumer_offsets // partitions or for all partitions when the new group coordinator is disabled. - controlRecords += partition -> MemoryRecords.withEndTransactionMarker( + // If topicIdPartition contains Uuid.ZERO_UUid all functionality will fall back on topic name. + controlRecords += replicaManager.topicIdPartition(partition) -> MemoryRecords.withEndTransactionMarker( producerId, marker.producerEpoch, new EndTransactionMarker(controlRecordType, marker.coordinatorEpoch) @@ -1750,8 +1825,8 @@ class KafkaApis(val requestChannel: RequestChannel, entriesPerPartition = controlRecords, requestLocal = requestLocal, responseCallback = errors => { - errors.foreachEntry { (tp, partitionResponse) => - addResultAndMaybeComplete(tp, partitionResponse.error) + errors.foreachEntry { (topicIdPartition, partitionResponse) => + addResultAndMaybeComplete(topicIdPartition.topicPartition(), partitionResponse.error) } } ) @@ -1819,7 +1894,7 @@ class KafkaApis(val requestChannel: RequestChannel, } else { val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]() val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]() - val authorizedPartitions = mutable.Set[TopicPartition]() + val authorizedPartitions = new util.HashSet[TopicPartition]() // Only request versions less than 4 need write authorization since they come from clients. val authorizedTopics = @@ -1841,7 +1916,7 @@ class KafkaApis(val requestChannel: RequestChannel, // partitions which failed, and an 'OPERATION_NOT_ATTEMPTED' error code for the partitions which succeeded // the authorization check to indicate that they were not added to the transaction. val partitionErrors = unauthorizedTopicErrors ++ nonExistingTopicErrors ++ - authorizedPartitions.map(_ -> Errors.OPERATION_NOT_ATTEMPTED) + authorizedPartitions.asScala.map(_ -> Errors.OPERATION_NOT_ATTEMPTED) addResultAndMaybeSendResponse(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitionErrors.asJava)) } else { def sendResponseCallback(error: Errors): Unit = { @@ -1920,7 +1995,7 @@ class KafkaApis(val requestChannel: RequestChannel, txnCoordinator.handleAddPartitionsToTransaction(transactionalId, addOffsetsToTxnRequest.data.producerId, addOffsetsToTxnRequest.data.producerEpoch, - Set(offsetTopicPartition), + util.Set.of(offsetTopicPartition), sendResponseCallback, TransactionVersion.TV_0, // This request will always come from the client not using TV 2. requestLocal) @@ -2160,12 +2235,12 @@ class KafkaApis(val requestChannel: RequestChannel, (replicaManager.describeLogDirs(partitions), Errors.NONE) } else { - (List.empty[DescribeLogDirsResponseData.DescribeLogDirsResult], Errors.CLUSTER_AUTHORIZATION_FAILED) + (util.Collections.emptyList[DescribeLogDirsResponseData.DescribeLogDirsResult], Errors.CLUSTER_AUTHORIZATION_FAILED) } } requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => new DescribeLogDirsResponse(new DescribeLogDirsResponseData() .setThrottleTimeMs(throttleTimeMs) - .setResults(logDirInfos.asJava) + .setResults(logDirInfos) .setErrorCode(error.code))) } @@ -2238,7 +2313,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (!allowTokenRequests(request)) sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, Collections.emptyList) - else if (!new DelegationTokenManagerConfigs(config).tokenAuthEnabled) + else if (!tokenManager.isEnabled) sendResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, Collections.emptyList) else { val requestPrincipal = request.context.principal @@ -2482,7 +2557,13 @@ class KafkaApis(val requestChannel: RequestChannel, val filteredProducerIds = listTransactionsRequest.data.producerIdFilters.asScala.map(Long.unbox).toSet val filteredStates = listTransactionsRequest.data.stateFilters.asScala.toSet val durationFilter = listTransactionsRequest.data.durationFilter() - val response = txnCoordinator.handleListTransactions(filteredProducerIds, filteredStates, durationFilter) + val transactionalIdPatternFilter = listTransactionsRequest.data.transactionalIdPattern + val response = txnCoordinator.handleListTransactions( + filteredProducerIds, + filteredStates, + durationFilter, + transactionalIdPatternFilter + ) // The response should contain only transactionalIds that the principal // has `Describe` permission to access. @@ -2631,11 +2712,15 @@ class KafkaApis(val requestChannel: RequestChannel, } } } + } + private def streamsVersion(): StreamsVersion = { + StreamsVersion.fromFeatureLevel(metadataCache.features.finalizedFeatures.getOrDefault(StreamsVersion.FEATURE_NAME, 0.toShort)) } private def isStreamsGroupProtocolEnabled: Boolean = { - config.groupCoordinatorRebalanceProtocols.contains(Group.GroupType.STREAMS) + config.groupCoordinatorRebalanceProtocols.contains(Group.GroupType.STREAMS) && + streamsVersion().streamsGroupSupported } def handleStreamsGroupHeartbeat(request: RequestChannel.Request): CompletableFuture[Unit] = { @@ -2713,16 +2798,49 @@ class KafkaApis(val requestChannel: RequestChannel, if (responseData.status() == null) { responseData.setStatus(new util.ArrayList()); } - responseData.status().add( - new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Unauthorized to CREATE on topics " + createTopicUnauthorized.mkString(",") + ".") - ) + val missingInternalTopicStatus = + responseData.status().stream().filter(x => x.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()).findFirst() + if (missingInternalTopicStatus.isPresent) { + missingInternalTopicStatus.get().setStatusDetail( + missingInternalTopicStatus.get().statusDetail() + "; Unauthorized to CREATE on topics " + createTopicUnauthorized.mkString(", ") + "." + ) + } else { + responseData.status().add( + new StreamsGroupHeartbeatResponseData.Status() + .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) + .setStatusDetail("Unauthorized to CREATE on topics " + createTopicUnauthorized.mkString(", ") + ".") + ) + } } else { - autoTopicCreationManager.createStreamsInternalTopics(topicsToCreate, requestContext); + // Compute group-specific timeout for caching errors (2 * heartbeat interval) + val heartbeatIntervalMs = Option(groupConfigManager.groupConfig(streamsGroupHeartbeatRequest.data.groupId).orElse(null)) + .map(_.streamsHeartbeatIntervalMs().toLong) + .getOrElse(config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs().toLong) + val timeoutMs = heartbeatIntervalMs * 2 + + autoTopicCreationManager.createStreamsInternalTopics(topicsToCreate, requestContext, timeoutMs) + + // Check for cached topic creation errors only if there's already a MISSING_INTERNAL_TOPICS status + val hasMissingInternalTopicsStatus = responseData.status() != null && + responseData.status().stream().anyMatch(s => s.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) + + if (hasMissingInternalTopicsStatus) { + val currentTimeMs = time.milliseconds() + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(topicsToCreate.keys.toSet, currentTimeMs) + if (cachedErrors.nonEmpty) { + val missingInternalTopicStatus = + responseData.status().stream().filter(x => x.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()).findFirst() + val creationErrorDetails = cachedErrors.map { case (topic, error) => s"$topic ($error)" }.mkString(", ") + if (missingInternalTopicStatus.isPresent) { + val existingDetail = Option(missingInternalTopicStatus.get().statusDetail()).getOrElse("") + missingInternalTopicStatus.get().setStatusDetail( + existingDetail + s"; Creation failed: $creationErrorDetails." + ) + } + } + } } } - requestHelper.sendMaybeThrottle(request, new StreamsGroupHeartbeatResponse(responseData)) } } @@ -2849,16 +2967,60 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleListClientMetricsResources(request: RequestChannel.Request): Unit = { - val listClientMetricsResourcesRequest = request.body[ListClientMetricsResourcesRequest] + /** + * Handle ListConfigResourcesRequest. If resourceTypes are not specified, it uses ListConfigResourcesRequest#supportedResourceTypes + * to retrieve config resources. If resourceTypes are specified, it returns matched config resources. + * If a config resource type is not supported, the handler returns UNSUPPORTED_VERSION. + */ + private def handleListConfigResources(request: RequestChannel.Request): Unit = { + val listConfigResourcesRequest = request.body[ListConfigResourcesRequest] if (!authHelper.authorize(request.context, DESCRIBE_CONFIGS, CLUSTER, CLUSTER_NAME)) { - requestHelper.sendMaybeThrottle(request, listClientMetricsResourcesRequest.getErrorResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) + requestHelper.sendMaybeThrottle(request, listConfigResourcesRequest.getErrorResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - val data = new ListClientMetricsResourcesResponseData().setClientMetricsResources( - clientMetricsManager.listClientMetricsResources.stream.map( - name => new ClientMetricsResource().setName(name)).collect(Collectors.toList())) - requestHelper.sendMaybeThrottle(request, new ListClientMetricsResourcesResponse(data)) + val data = new ListConfigResourcesResponseData() + + val supportedResourceTypes = listConfigResourcesRequest.supportedResourceTypes() + var resourceTypes = listConfigResourcesRequest.data().resourceTypes() + if (resourceTypes.isEmpty) { + resourceTypes = supportedResourceTypes.stream().toList + } + + resourceTypes.forEach(resourceType => + if (!supportedResourceTypes.contains(resourceType)) { + requestHelper.sendMaybeThrottle(request, new ListConfigResourcesResponse(data.setErrorCode(Errors.UNSUPPORTED_VERSION.code()))) + return + } + ) + + val result = new util.ArrayList[ListConfigResourcesResponseData.ConfigResource]() + if (resourceTypes.contains(ConfigResource.Type.GROUP.id)) { + groupConfigManager.groupIds().forEach(id => + result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(id).setResourceType(ConfigResource.Type.GROUP.id)) + ) + } + if (resourceTypes.contains(ConfigResource.Type.CLIENT_METRICS.id)) { + clientMetricsManager.listClientMetricsResources.forEach(name => + result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(name).setResourceType(ConfigResource.Type.CLIENT_METRICS.id)) + ) + } + if (resourceTypes.contains(ConfigResource.Type.BROKER_LOGGER.id)) { + metadataCache.getBrokerNodes(request.context.listenerName).forEach(node => + result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(node.id.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id)) + ) + } + if (resourceTypes.contains(ConfigResource.Type.BROKER.id)) { + metadataCache.getBrokerNodes(request.context.listenerName).forEach(node => + result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(node.id.toString).setResourceType(ConfigResource.Type.BROKER.id)) + ) + } + if (resourceTypes.contains(ConfigResource.Type.TOPIC.id)) { + metadataCache.getAllTopics.forEach(name => + result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(name).setResourceType(ConfigResource.Type.TOPIC.id)) + ) + } + data.setConfigResources(result) + requestHelper.sendMaybeThrottle(request, new ListConfigResourcesResponse(data)) } } @@ -2986,12 +3148,12 @@ class KafkaApis(val requestChannel: RequestChannel, /** * Handle a shareFetch request */ - def handleShareFetchRequest(request: RequestChannel.Request): Unit = { + def handleShareFetchRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val shareFetchRequest = request.body[ShareFetchRequest] if (!isShareGroupProtocolEnabled) { requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } val groupId = shareFetchRequest.data.groupId @@ -2999,7 +3161,7 @@ class KafkaApis(val requestChannel: RequestChannel, // Share Fetch needs permission to perform the READ action on the named group resource (groupId) if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } val memberId = shareFetchRequest.data.memberId @@ -3022,11 +3184,22 @@ class KafkaApis(val requestChannel: RequestChannel, try { // Creating the shareFetchContext for Share Session Handling. if context creation fails, the request is failed directly here. - shareFetchContext = sharePartitionManager.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent) + shareFetchContext = sharePartitionManager.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent, request.context.connectionId) } catch { + case _: ShareSessionLimitReachedException => + sharePartitionManager.createIdleShareFetchTimerTask(shareFetchRequest.maxWait).handle( + (_, exception) => { + if (exception != null) { + requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, exception)) + } else { + requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.SHARE_SESSION_LIMIT_REACHED.exception)) + } + } + ) + return CompletableFuture.completedFuture[Unit](()) case e: Exception => requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) - return + return CompletableFuture.completedFuture[Unit](()) } val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = shareFetchContext.getErroneousAndValidTopicIdPartitions @@ -3116,6 +3289,7 @@ class KafkaApis(val requestChannel: RequestChannel, .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(value) + .setRecords(MemoryRecords.EMPTY) topic.partitions.add(fetchPartitionData) } topicPartitionAcknowledgements.remove(topicId) @@ -3131,6 +3305,7 @@ class KafkaApis(val requestChannel: RequestChannel, .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(value) + .setRecords(MemoryRecords.EMPTY) topicData.partitions.add(fetchPartitionData) } shareFetchResponse.data.responses.add(topicData) @@ -3174,7 +3349,7 @@ class KafkaApis(val requestChannel: RequestChannel, val interestedTopicPartitions = new util.ArrayList[TopicIdPartition] - erroneousAndValidPartitionData.validTopicIdPartitions.forEach { case topicIdPartition => + erroneousAndValidPartitionData.validTopicIdPartitions.forEach { topicIdPartition => if (!authorizedTopics.contains(topicIdPartition.topicPartition.topic)) erroneous += topicIdPartition -> ShareFetchResponse.partitionResponse(topicIdPartition, Errors.TOPIC_AUTHORIZATION_FAILED) else if (!metadataCache.contains(topicIdPartition.topicPartition)) @@ -3194,7 +3369,7 @@ class KafkaApis(val requestChannel: RequestChannel, // for share fetch from consumer, cap fetchMaxBytes to the maximum bytes that could be fetched without being // throttled given no bytes were recorded in the recent quota window. Trying to fetch more bytes would result // in a guaranteed throttling potentially blocking consumer progress. - val maxQuotaWindowBytes = quotas.fetch.getMaxValueInQuotaWindow(request.session, clientId).toInt + val maxQuotaWindowBytes = quotas.fetch.maxValueInQuotaWindow(request.session, clientId).toInt val fetchMaxBytes = Math.min(Math.min(shareFetchRequest.maxBytes, config.fetchMaxBytes), maxQuotaWindowBytes) val fetchMinBytes = Math.min(shareFetchRequest.minBytes, fetchMaxBytes) @@ -3294,13 +3469,13 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleShareAcknowledgeRequest(request: RequestChannel.Request): Unit = { + def handleShareAcknowledgeRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val shareAcknowledgeRequest = request.body[ShareAcknowledgeRequest] if (!isShareGroupProtocolEnabled) { requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } val groupId = shareAcknowledgeRequest.data.groupId @@ -3309,7 +3484,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } val memberId = shareAcknowledgeRequest.data.memberId @@ -3322,7 +3497,7 @@ class KafkaApis(val requestChannel: RequestChannel, } catch { case e: Exception => requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) - return + return CompletableFuture.completedFuture[Unit](()) } val topicIdPartitionSeq: mutable.Set[TopicIdPartition] = mutable.Set() @@ -3370,6 +3545,8 @@ class KafkaApis(val requestChannel: RequestChannel, def handleInitializeShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val initializeShareGroupStateRequest = request.body[InitializeShareGroupStateRequest] + // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, + // hence requests won't reach Persister. if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { requestHelper.sendMaybeThrottle(request, new InitializeShareGroupStateResponse( @@ -3380,25 +3557,20 @@ class KafkaApis(val requestChannel: RequestChannel, return CompletableFuture.completedFuture[Unit](()) } - shareCoordinator match { - case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - initializeShareGroupStateRequest.getErrorResponse(requestThrottleMs, - new ApiException("Share coordinator is not enabled."))) - CompletableFuture.completedFuture[Unit](()) - - case Some(coordinator) => coordinator.initializeState(request.context, initializeShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, initializeShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new InitializeShareGroupStateResponse(response)) - } + shareCoordinator.initializeState(request.context, initializeShareGroupStateRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, initializeShareGroupStateRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new InitializeShareGroupStateResponse(response)) } - } + } } def handleReadShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val readShareGroupStateRequest = request.body[ReadShareGroupStateRequest] + // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, + // hence requests won't reach Persister. if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse( @@ -3409,24 +3581,20 @@ class KafkaApis(val requestChannel: RequestChannel, return CompletableFuture.completedFuture[Unit](()) } - shareCoordinator match { - case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - readShareGroupStateRequest.getErrorResponse(requestThrottleMs, - new ApiException("Share coordinator is not enabled."))) - CompletableFuture.completedFuture[Unit](()) - case Some(coordinator) => coordinator.readState(request.context, readShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, readShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse(response)) - } + shareCoordinator.readState(request.context, readShareGroupStateRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, readShareGroupStateRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse(response)) } - } + } } def handleWriteShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val writeShareGroupStateRequest = request.body[WriteShareGroupStateRequest] + // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, + // hence requests won't reach Persister. if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse( @@ -3437,24 +3605,20 @@ class KafkaApis(val requestChannel: RequestChannel, return CompletableFuture.completedFuture[Unit](()) } - shareCoordinator match { - case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - writeShareGroupStateRequest.getErrorResponse(requestThrottleMs, - new ApiException("Share coordinator is not enabled."))) - CompletableFuture.completedFuture[Unit](()) - case Some(coordinator) => coordinator.writeState(request.context, writeShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, writeShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse(response)) - } + shareCoordinator.writeState(request.context, writeShareGroupStateRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, writeShareGroupStateRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse(response)) } - } + } } def handleDeleteShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val deleteShareGroupStateRequest = request.body[DeleteShareGroupStateRequest] + // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, + // hence requests won't reach Persister. if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { requestHelper.sendMaybeThrottle(request, new DeleteShareGroupStateResponse( @@ -3465,25 +3629,20 @@ class KafkaApis(val requestChannel: RequestChannel, return CompletableFuture.completedFuture[Unit](()) } - shareCoordinator match { - case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - deleteShareGroupStateRequest.getErrorResponse(requestThrottleMs, - new ApiException("Share coordinator is not enabled."))) - CompletableFuture.completedFuture[Unit](()) - - case Some(coordinator) => coordinator.deleteState(request.context, deleteShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new DeleteShareGroupStateResponse(response)) - } + shareCoordinator.deleteState(request.context, deleteShareGroupStateRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, deleteShareGroupStateRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new DeleteShareGroupStateResponse(response)) } - } + } } def handleReadShareGroupStateSummaryRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val readShareGroupStateSummaryRequest = request.body[ReadShareGroupStateSummaryRequest] + // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, + // hence requests won't reach Persister. if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse( @@ -3494,23 +3653,17 @@ class KafkaApis(val requestChannel: RequestChannel, return CompletableFuture.completedFuture[Unit](()) } - shareCoordinator match { - case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - readShareGroupStateSummaryRequest.getErrorResponse(requestThrottleMs, - new ApiException("Share coordinator is not enabled."))) - CompletableFuture.completedFuture[Unit](()) - case Some(coordinator) => coordinator.readStateSummary(request.context, readShareGroupStateSummaryRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse(response)) - } + shareCoordinator.readStateSummary(request.context, readShareGroupStateSummaryRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse(response)) } - } + } } - def handleDescribeShareGroupOffsetsRequest(request: RequestChannel.Request): Unit = { + def handleDescribeShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val describeShareGroupOffsetsRequest = request.body[DescribeShareGroupOffsetsRequest] val groups = describeShareGroupOffsetsRequest.groups() @@ -3623,23 +3776,68 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleAlterShareGroupOffsetsRequest(request: RequestChannel.Request): Unit = { + def handleAlterShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val alterShareGroupOffsetsRequest = request.body[AlterShareGroupOffsetsRequest] - requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + val groupId = alterShareGroupOffsetsRequest.data.groupId + + if (!isShareGroupProtocolEnabled) { + requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + return CompletableFuture.completedFuture[Unit](()) + } else if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { + requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) + } else { + val responseBuilder = new AlterShareGroupOffsetsResponse.Builder() + val authorizedTopicPartitions = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection() + + alterShareGroupOffsetsRequest.data.topics.forEach(topic => { + val topicError = { + if (!authHelper.authorize(request.context, READ, TOPIC, topic.topicName)) { + Some(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED)) + } else if (!metadataCache.contains(topic.topicName)) { + Some(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + } else { + None + } + } + topicError match { + case Some(error) => + topic.partitions.forEach(partition => responseBuilder.addPartition(topic.topicName, partition.partitionIndex, metadataCache.topicNamesToIds, error)) + case None => + authorizedTopicPartitions.add(topic.duplicate) + } + }) + + val data = new AlterShareGroupOffsetsRequestData() + .setGroupId(groupId) + .setTopics(authorizedTopicPartitions) + groupCoordinator.alterShareGroupOffsets( + request.context, + groupId, + data + ).handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(exception)) + } else if (response.errorCode != Errors.NONE.code) { + requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, response.errorCode, response.errorMessage)) + } else { + requestHelper.sendMaybeThrottle(request, responseBuilder.merge(response, metadataCache.topicNamesToIds).build()) + } + } + } CompletableFuture.completedFuture[Unit](()) } - def handleDeleteShareGroupOffsetsRequest(request: RequestChannel.Request): Unit = { + def handleDeleteShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val deleteShareGroupOffsetsRequest = request.body[DeleteShareGroupOffsetsRequest] val groupId = deleteShareGroupOffsetsRequest.data.groupId if (!isShareGroupProtocolEnabled) { requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } else if (!authHelper.authorize(request.context, DELETE, GROUP, groupId)) { requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return + return CompletableFuture.completedFuture[Unit](()) } val deleteShareGroupOffsetsResponseTopics: util.List[DeleteShareGroupOffsetsResponseTopic] = new util.ArrayList[DeleteShareGroupOffsetsResponseTopic]() @@ -3652,33 +3850,27 @@ class KafkaApis(val requestChannel: RequestChannel, deleteShareGroupOffsetsResponseTopics.add( new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() .setTopicName(topic.topicName) - .setPartitions(topic.partitions.map(partition => { - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(partition) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message()) - }).toList.asJava) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message) ) } else { authorizedTopics.add(topic) } } - if (authorizedTopics.isEmpty) { - requestHelper.sendMaybeThrottle(request, new DeleteShareGroupOffsetsResponse(new DeleteShareGroupOffsetsResponseData())) - return - } - groupCoordinator.deleteShareGroupOffsets( request.context, new DeleteShareGroupOffsetsRequestData().setGroupId(groupId).setTopics(authorizedTopics) ).handle[Unit] {(responseData, exception) => { if (exception != null) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, exception)) + requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse( + AbstractResponse.DEFAULT_THROTTLE_TIME, + Errors.forException(exception).code, + exception.getMessage)) } else if (responseData.errorCode() != Errors.NONE.code) { requestHelper.sendMaybeThrottle( request, - deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, responseData.errorCode(), responseData.errorMessage()) + deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, responseData.errorCode, responseData.errorMessage) ) } else { responseData.responses.forEach { topic => { @@ -3902,14 +4094,14 @@ class KafkaApis(val requestChannel: RequestChannel, val responseSize = shareFetchContext.responseSize(partitions, versionId) val timeMs = time.milliseconds() val requestThrottleTimeMs = quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) - val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs) + val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), responseSize, timeMs) val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs) if (maxThrottleTimeMs > 0) { request.apiThrottleTimeMs = maxThrottleTimeMs // Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value // from the fetch quota because we are going to return an empty response. - quotas.fetch.unrecordQuotaSensor(request, responseSize, timeMs) + quotas.fetch.unrecordQuotaSensor(request.session, request.header.clientId(), responseSize, timeMs) if (bandwidthThrottleTimeMs > requestThrottleTimeMs) { requestHelper.throttle(quotas.fetch, request, bandwidthThrottleTimeMs) } else { @@ -3937,8 +4129,12 @@ class KafkaApis(val requestChannel: RequestChannel, .setCurrentLeader(partitionData.currentLeader) } + private def shareVersion(): ShareVersion = { + ShareVersion.fromFeatureLevel(metadataCache.features.finalizedFeatures.getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort)) + } + private def isShareGroupProtocolEnabled: Boolean = { - config.shareGroupConfig.isShareGroupEnabled + config.shareGroupConfig.isShareGroupEnabled || shareVersion().supportsShareGroups } private def updateRecordConversionStats(request: RequestChannel.Request, diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 7049d5f24744b..d9b8c5cd91fad 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit import java.util.Properties import kafka.utils.{CoreUtils, Logging} import kafka.utils.Implicits._ -import org.apache.kafka.common.Reconfigurable +import org.apache.kafka.common.{Endpoint, Reconfigurable} import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, TopicConfig} import org.apache.kafka.common.config.ConfigDef.ConfigKey import org.apache.kafka.common.config.internals.BrokerSecurityConfigs @@ -34,7 +34,6 @@ import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.security.auth.KafkaPrincipalSerde import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Utils -import org.apache.kafka.network.EndPoint import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} @@ -44,11 +43,10 @@ import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} import org.apache.kafka.security.authorizer.AuthorizerUtils import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.authorizer.Authorizer -import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.config.AbstractKafkaConfig.getMap import org.apache.kafka.server.config.{AbstractKafkaConfig, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs -import org.apache.kafka.server.util.Csv import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} import scala.jdk.CollectionConverters._ @@ -209,7 +207,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def quotaConfig: QuotaConfig = _quotaConfig /** ********* General Configuration ***********/ - var brokerId: Int = getInt(ServerConfigs.BROKER_ID_CONFIG) val nodeId: Int = getInt(KRaftConfigs.NODE_ID_CONFIG) val initialRegistrationTimeoutMs: Int = getInt(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG) val brokerHeartbeatIntervalMs: Int = getInt(KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG) @@ -224,14 +221,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) case role => throw new ConfigException(s"Unknown process role '$role'" + " (only 'broker' and 'controller' are allowed roles)") } - - val distinctRoles: Set[ProcessRole] = roles.toSet - - if (distinctRoles.size != roles.size) { - throw new ConfigException(s"Duplicate role names found in `${KRaftConfigs.PROCESS_ROLES_CONFIG}`: $roles") - } - - distinctRoles + roles.toSet } def isKRaftCombinedMode: Boolean = { @@ -241,14 +231,13 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def metadataLogDir: String = { Option(getString(MetadataLogConfig.METADATA_LOG_DIR_CONFIG)) match { case Some(dir) => dir - case None => logDirs.head + case None => logDirs.get(0) } } val serverMaxStartupTimeMs = getLong(KRaftConfigs.SERVER_MAX_STARTUP_TIME_MS_CONFIG) def messageMaxBytes = getInt(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG) - val requestTimeoutMs = getInt(ServerConfigs.REQUEST_TIMEOUT_MS_CONFIG) val connectionSetupTimeoutMs = getLong(ServerConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG) val connectionSetupTimeoutMaxMs = getLong(ServerConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG) @@ -276,12 +265,12 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } val earlyStartListeners: Set[ListenerName] = { - val listenersSet = listeners.map(_.listenerName).toSet - val controllerListenersSet = controllerListeners.map(_.listenerName).toSet - Option(getString(ServerConfigs.EARLY_START_LISTENERS_CONFIG)) match { + val listenersSet = listeners.map(l => ListenerName.normalised(l.listener)).toSet + val controllerListenersSet = controllerListeners.map(l => ListenerName.normalised(l.listener)).toSet + Option(getList(ServerConfigs.EARLY_START_LISTENERS_CONFIG)) match { case None => controllerListenersSet - case Some(str) => - str.split(",").map(_.trim()).filterNot(_.isEmpty).map { str => + case Some(list) => + list.asScala.map(_.trim()).filterNot(_.isEmpty).map { str => val listenerName = new ListenerName(str) if (!listenersSet.contains(listenerName) && !controllerListenersSet.contains(listenerName)) throw new ConfigException(s"${ServerConfigs.EARLY_START_LISTENERS_CONFIG} contains " + @@ -299,7 +288,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) val socketListenBacklogSize = getInt(SocketServerConfigs.SOCKET_LISTEN_BACKLOG_SIZE_CONFIG) def maxConnectionsPerIp = getInt(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG) def maxConnectionsPerIpOverrides: Map[String, Int] = - getMap(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, getString(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG)).map { case (k, v) => (k, v.toInt)} + getMap(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, getString(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG)).asScala.map { case (k, v) => (k, v.toInt)} def maxConnections = getInt(SocketServerConfigs.MAX_CONNECTIONS_CONFIG) def maxConnectionCreationRate = getInt(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG) val connectionsMaxIdleMs = getLong(SocketServerConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG) @@ -315,7 +304,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) /** ********* Log Configuration ***********/ val autoCreateTopicsEnable = getBoolean(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG) val numPartitions = getInt(ServerLogConfigs.NUM_PARTITIONS_CONFIG) - val logDirs: Seq[String] = Csv.parseCsvList(Option(getString(ServerLogConfigs.LOG_DIRS_CONFIG)).getOrElse(getString(ServerLogConfigs.LOG_DIR_CONFIG))).asScala def logSegmentBytes = getInt(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG) def logFlushIntervalMessages = getLong(ServerLogConfigs.LOG_FLUSH_INTERVAL_MESSAGES_CONFIG) def logCleanerThreads = getInt(CleanerConfig.LOG_CLEANER_THREADS_PROP) @@ -379,14 +367,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) if (!protocols.contains(GroupType.CLASSIC)) { throw new ConfigException(s"Disabling the '${GroupType.CLASSIC}' protocol is not supported.") } - if (protocols.contains(GroupType.SHARE)) { - warn(s"Share groups and the new '${GroupType.SHARE}' rebalance protocol are enabled. " + - "This is part of the early access of KIP-932 and MUST NOT be used in production.") - } - if (protocols.contains(GroupType.STREAMS)) { - warn(s"Streams groups and the new '${GroupType.STREAMS}' rebalance protocol are enabled. " + - "This is part of the early access of KIP-1071 and MUST NOT be used in production.") - } protocols } @@ -410,8 +390,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) Set.empty[String] } - def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1 - def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2 def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG) /** ********* Fetch Configuration **************/ @@ -455,60 +433,43 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) millis } - private def getMap(propName: String, propValue: String): Map[String, String] = { - try { - Csv.parseCsvMap(propValue).asScala - } catch { - case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage)) - } - } - - def listeners: Seq[EndPoint] = - CoreUtils.listenerListToEndPoints(getString(SocketServerConfigs.LISTENERS_CONFIG), effectiveListenerSecurityProtocolMap) + def listeners: Seq[Endpoint] = + CoreUtils.listenerListToEndPoints(getList(SocketServerConfigs.LISTENERS_CONFIG), effectiveListenerSecurityProtocolMap) - def controllerListenerNames: Seq[String] = { - val value = Option(getString(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG)).getOrElse("") - if (value.isEmpty) { - Seq.empty - } else { - value.split(",") - } - } - - def controllerListeners: Seq[EndPoint] = - listeners.filter(l => controllerListenerNames.contains(l.listenerName.value())) + def controllerListeners: Seq[Endpoint] = + listeners.filter(l => controllerListenerNames.contains(l.listener)) def saslMechanismControllerProtocol: String = getString(KRaftConfigs.SASL_MECHANISM_CONTROLLER_PROTOCOL_CONFIG) - def dataPlaneListeners: Seq[EndPoint] = { + def dataPlaneListeners: Seq[Endpoint] = { listeners.filterNot { listener => - val name = listener.listenerName.value() + val name = listener.listener controllerListenerNames.contains(name) } } - def effectiveAdvertisedControllerListeners: Seq[EndPoint] = { - val advertisedListenersProp = getString(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) + def effectiveAdvertisedControllerListeners: Seq[Endpoint] = { + val advertisedListenersProp = getList(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) val controllerAdvertisedListeners = if (advertisedListenersProp != null) { CoreUtils.listenerListToEndPoints(advertisedListenersProp, effectiveListenerSecurityProtocolMap, requireDistinctPorts=false) - .filter(l => controllerListenerNames.contains(l.listenerName.value())) + .filter(l => controllerListenerNames.contains(l.listener)) } else { Seq.empty } val controllerListenersValue = controllerListeners - controllerListenerNames.flatMap { name => + controllerListenerNames.asScala.flatMap { name => controllerAdvertisedListeners - .find(endpoint => endpoint.listenerName.equals(ListenerName.normalised(name))) + .find(endpoint => ListenerName.normalised(endpoint.listener).equals(ListenerName.normalised(name))) .orElse( // If users don't define advertised.listeners, the advertised controller listeners inherit from listeners configuration // which match listener names in controller.listener.names. // Removing "0.0.0.0" host to avoid validation errors. This is to be compatible with the old behavior before 3.9. // The null or "" host does a reverse lookup in ListenerInfo#withWildcardHostnamesResolved. controllerListenersValue - .find(endpoint => endpoint.listenerName.equals(ListenerName.normalised(name))) + .find(endpoint => ListenerName.normalised(endpoint.listener).equals(ListenerName.normalised(name))) .map(endpoint => if (endpoint.host == "0.0.0.0") { - new EndPoint(null, endpoint.port, endpoint.listenerName, endpoint.securityProtocol) + new Endpoint(endpoint.listener, endpoint.securityProtocol, null, endpoint.port) } else { endpoint }) @@ -516,67 +477,16 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } } - def effectiveAdvertisedBrokerListeners: Seq[EndPoint] = { + def effectiveAdvertisedBrokerListeners: Seq[Endpoint] = { // Use advertised listeners if defined, fallback to listeners otherwise - val advertisedListenersProp = getString(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) + val advertisedListenersProp = getList(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) val advertisedListeners = if (advertisedListenersProp != null) { CoreUtils.listenerListToEndPoints(advertisedListenersProp, effectiveListenerSecurityProtocolMap, requireDistinctPorts=false) } else { listeners } // Only expose broker listeners - advertisedListeners.filterNot(l => controllerListenerNames.contains(l.listenerName.value())) - } - - private def getInterBrokerListenerNameAndSecurityProtocol: (ListenerName, SecurityProtocol) = { - Option(getString(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)) match { - case Some(_) if originals.containsKey(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG) => - throw new ConfigException(s"Only one of ${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} and " + - s"${ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG} should be set.") - case Some(name) => - val listenerName = ListenerName.normalised(name) - val securityProtocol = effectiveListenerSecurityProtocolMap.getOrElse(listenerName, - throw new ConfigException(s"Listener with name ${listenerName.value} defined in " + - s"${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG}.")) - (listenerName, securityProtocol) - case None => - val securityProtocol = getSecurityProtocol(getString(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG), - ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG) - (ListenerName.forSecurityProtocol(securityProtocol), securityProtocol) - } - } - - private def getSecurityProtocol(protocolName: String, configName: String): SecurityProtocol = { - try SecurityProtocol.forName(protocolName) - catch { - case _: IllegalArgumentException => - throw new ConfigException(s"Invalid security protocol `$protocolName` defined in $configName") - } - } - - def effectiveListenerSecurityProtocolMap: Map[ListenerName, SecurityProtocol] = { - val mapValue = getMap(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, getString(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) - .map { case (listenerName, protocolName) => - ListenerName.normalised(listenerName) -> getSecurityProtocol(protocolName, SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG) - } - if (!originals.containsKey(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) { - // Nothing was specified explicitly for listener.security.protocol.map, so we are using the default value, - // and we are using KRaft. - // Add PLAINTEXT mappings for controller listeners as long as there is no SSL or SASL_{PLAINTEXT,SSL} in use - def isSslOrSasl(name: String): Boolean = name.equals(SecurityProtocol.SSL.name) || name.equals(SecurityProtocol.SASL_SSL.name) || name.equals(SecurityProtocol.SASL_PLAINTEXT.name) - // check controller listener names (they won't appear in listeners when process.roles=broker) - // as well as listeners for occurrences of SSL or SASL_* - if (controllerListenerNames.exists(isSslOrSasl) || - Csv.parseCsvList(getString(SocketServerConfigs.LISTENERS_CONFIG)).asScala.exists(listenerValue => isSslOrSasl(EndPoint.parseListenerName(listenerValue)))) { - mapValue // don't add default mappings since we found something that is SSL or SASL_* - } else { - // add the PLAINTEXT mappings for all controller listener names that are not explicitly PLAINTEXT - mapValue ++ controllerListenerNames.filterNot(SecurityProtocol.PLAINTEXT.name.equals(_)).map( - new ListenerName(_) -> SecurityProtocol.PLAINTEXT) - } - } else { - mapValue - } + advertisedListeners.filterNot(l => controllerListenerNames.contains(l.listener)) } validateValues() @@ -588,14 +498,21 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) require(logRollTimeMillis >= 1, "log.roll.ms must be greater than or equal to 1") require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be greater than or equal to 0") require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, greater than or equal to 1") - require(logDirs.nonEmpty, "At least one log directory must be defined via log.dirs or log.dir.") + require(logDirs.size > 0, "At least one log directory must be defined via log.dirs or log.dir.") require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.") require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" + " to prevent unnecessary socket timeouts") require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be less than or equal to replica.lag.time.max.ms" + " to prevent frequent changes in ISR") - val advertisedBrokerListenerNames = effectiveAdvertisedBrokerListeners.map(_.listenerName).toSet + if (brokerHeartbeatIntervalMs * 2 > brokerSessionTimeoutMs) { + error(s"${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG} ($brokerHeartbeatIntervalMs ms) must be less than or equal to half of the ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} ($brokerSessionTimeoutMs ms). " + + s"The ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} is configured on controller. The ${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG} is configured on broker. " + + s"If a broker doesn't send heartbeat request within ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG}, it loses broker lease. " + + s"Please increase ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} or decrease ${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG}.") + } + + val advertisedBrokerListenerNames = effectiveAdvertisedBrokerListeners.map(l => ListenerName.normalised(l.listener)).toSet // validate KRaft-related configs val voterIds = QuorumConfig.parseVoterIds(quorumConfig.voters) @@ -618,8 +535,8 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must contain at least one value appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running the KRaft controller role") } def validateControllerListenerNamesMustAppearInListenersForKRaftController(): Unit = { - val listenerNameValues = listeners.map(_.listenerName.value).toSet - require(controllerListenerNames.forall(cln => listenerNameValues.contains(cln)), + val listenerNameValues = listeners.map(_.listener).toSet + require(controllerListenerNames.stream().allMatch(cln => listenerNameValues.contains(cln)), s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must only contain values appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running the KRaft controller role") } def validateAdvertisedBrokerListenersNonEmptyForBroker(): Unit = { @@ -639,22 +556,25 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) require(!voterIds.contains(nodeId), s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains just the 'broker' role, the node id $nodeId must not be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}") // controller.listener.names must be non-empty... - require(controllerListenerNames.nonEmpty, + require(controllerListenerNames.size() > 0, s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must contain at least one value when running KRaft with just the broker role") // controller.listener.names are forbidden in listeners... require(controllerListeners.isEmpty, s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must not contain a value appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running KRaft with just the broker role") // controller.listener.names must all appear in listener.security.protocol.map - controllerListenerNames.foreach { name => + controllerListenerNames.forEach { name => val listenerName = ListenerName.normalised(name) - if (!effectiveListenerSecurityProtocolMap.contains(listenerName)) { + if (!effectiveListenerSecurityProtocolMap.containsKey(listenerName)) { throw new ConfigException(s"Controller listener with name ${listenerName.value} defined in " + s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG} (an explicit security mapping for each controller listener is required if ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG} is non-empty, or if there are security protocols other than PLAINTEXT in use)") } } + // controller.quorum.auto.join.enable must be false for KRaft broker-only + require(!quorumConfig.autoJoin, + s"${QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG} is only supported when ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains the 'controller' role.") // warn that only the first controller listener is used if there is more than one if (controllerListenerNames.size > 1) { - warn(s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} has multiple entries; only the first will be used since ${KRaftConfigs.PROCESS_ROLES_CONFIG}=broker: ${controllerListenerNames.asJava}") + warn(s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} has multiple entries; only the first will be used since ${KRaftConfigs.PROCESS_ROLES_CONFIG}=broker: ${controllerListenerNames}") } // warn if create.topic.policy.class.name or alter.config.policy.class.name is defined in the broker role warnIfConfigDefinedInWrongRole(ProcessRole.ControllerRole, ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG) @@ -685,7 +605,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) validateControllerListenerNamesMustAppearInListenersForKRaftController() } - val listenerNames = listeners.map(_.listenerName).toSet + val listenerNames = listeners.map(l => ListenerName.normalised(l.listener)).toSet if (processRoles.contains(ProcessRole.BrokerRole)) { validateAdvertisedBrokerListenersNonEmptyForBroker() require(advertisedBrokerListenerNames.contains(interBrokerListenerName), @@ -732,18 +652,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) s"${BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG} must implement KafkaPrincipalSerde") } - /** - * Validate some configurations for new MetadataVersion. A new MetadataVersion can take place when - * a FeatureLevelRecord for "metadata.version" is read from the cluster metadata. - */ - def validateWithMetadataVersion(metadataVersion: MetadataVersion): Unit = { - if (processRoles.contains(ProcessRole.BrokerRole) && logDirs.size > 1) { - require(metadataVersion.isDirectoryAssignmentSupported, - s"Multiple log directories (aka JBOD) are not supported in the current MetadataVersion ${metadataVersion}. " + - s"Need ${MetadataVersion.IBP_3_7_IV2} or higher") - } - } - /** * Copy the subset of properties that are relevant to Logs. The individual properties * are listed here since the names are slightly different in each Config class... diff --git a/core/src/main/scala/kafka/server/KafkaRaftServer.scala b/core/src/main/scala/kafka/server/KafkaRaftServer.scala index 34ee4a725f259..e3497a6ff88aa 100644 --- a/core/src/main/scala/kafka/server/KafkaRaftServer.scala +++ b/core/src/main/scala/kafka/server/KafkaRaftServer.scala @@ -134,7 +134,7 @@ object KafkaRaftServer { // Load and verify the original ensemble. val loader = new MetaPropertiesEnsemble.Loader() loader.addMetadataLogDir(config.metadataLogDir) - .addLogDirs(config.logDirs.asJava) + .addLogDirs(config.logDirs) val initialMetaPropsEnsemble = loader.load() val verificationFlags = util.EnumSet.of(REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR) initialMetaPropsEnsemble.verify(Optional.empty(), OptionalInt.of(config.nodeId), verificationFlags) diff --git a/core/src/main/scala/kafka/server/KafkaRequestHandler.scala b/core/src/main/scala/kafka/server/KafkaRequestHandler.scala index 815fe4966eb81..d4998cbb73488 100755 --- a/core/src/main/scala/kafka/server/KafkaRequestHandler.scala +++ b/core/src/main/scala/kafka/server/KafkaRequestHandler.scala @@ -201,7 +201,10 @@ class KafkaRequestHandlerPool( requestHandlerAvgIdleMetricName: String, nodeName: String = "broker" ) extends Logging { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "KafkaRequestHandlerPool" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) val threadPoolSize: AtomicInteger = new AtomicInteger(numThreads) /* a meter to track the average free capacity of the request handlers */ diff --git a/core/src/main/scala/kafka/server/LeaderEndPoint.scala b/core/src/main/scala/kafka/server/LeaderEndPoint.scala deleted file mode 100644 index 889fb6472160c..0000000000000 --- a/core/src/main/scala/kafka/server/LeaderEndPoint.scala +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.requests.FetchRequest -import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} -import org.apache.kafka.server.common.OffsetAndEpoch -import org.apache.kafka.server.network.BrokerEndPoint - -import scala.collection.Map - -/** - * This trait defines the APIs to be used to access a broker that is a leader. - */ -trait LeaderEndPoint { - - type FetchData = FetchResponseData.PartitionData - type EpochData = OffsetForLeaderEpochRequestData.OffsetForLeaderPartition - - /** - * A boolean specifying if truncation when fetching from the leader is supported - */ - def isTruncationOnFetchSupported: Boolean - - /** - * Initiate closing access to fetches from leader. - */ - def initiateClose(): Unit - - /** - * Closes access to fetches from leader. - * `initiateClose` must be called prior to invoking `close`. - */ - def close(): Unit - - /** - * The specific broker (host:port) we want to connect to. - */ - def brokerEndPoint(): BrokerEndPoint - - /** - * Given a fetchRequest, carries out the expected request and returns - * the results from fetching from the leader. - * - * @param fetchRequest The fetch request we want to carry out - * - * @return A map of topic partition -> fetch data - */ - def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] - - /** - * Fetches the epoch and log start offset of the given topic partition from the leader. - * - * @param topicPartition The topic partition that we want to fetch from - * @param currentLeaderEpoch An int representing the current leader epoch of the requester - * - * @return An OffsetAndEpoch object representing the earliest offset and epoch in the leader's topic partition. - */ - def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch - - /** - * Fetches the epoch and log end offset of the given topic partition from the leader. - * - * @param topicPartition The topic partition that we want to fetch from - * @param currentLeaderEpoch An int representing the current leader epoch of the requester - * - * @return An OffsetAndEpoch object representing the latest offset and epoch in the leader's topic partition. - */ - def fetchLatestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch - - /** - * Fetches offset for leader epoch from the leader for each given topic partition - * - * @param partitions A map of topic partition -> leader epoch of the replica - * - * @return A map of topic partition -> end offset for a requested leader epoch - */ - def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] - - /** - * Fetches the epoch and local log start offset from the leader for the given partition and the current leader-epoch - * - * @param topicPartition The topic partition that we want to fetch from - * @param currentLeaderEpoch An int representing the current leader epoch of the requester - * - * @return An OffsetAndEpoch object representing the earliest local offset and epoch in the leader's topic partition. - */ - def fetchEarliestLocalOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch - - /** - * Builds a fetch request, given a partition map. - * - * @param partitions A map of topic partitions to their respective partition fetch state - * - * @return A ResultWithPartitions, used to create the fetchRequest for fetch. - */ - def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] - -} diff --git a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala index 4249d3c34688c..f32d9f8037adb 100644 --- a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala @@ -17,11 +17,10 @@ package kafka.server -import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.utils.Logging import org.apache.kafka.common.errors.KafkaStorageException -import org.apache.kafka.common.message.FetchResponseData +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH @@ -29,11 +28,13 @@ import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, RequestUti import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.LeaderEndPoint +import org.apache.kafka.server.{PartitionFetchState, ReplicaFetch, ResultWithPartitions} import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import java.util import java.util.Optional -import scala.collection.{Map, Seq, Set, mutable} +import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ /** @@ -62,8 +63,8 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): collection.Map[TopicPartition, FetchData] = { - var partitionData: Seq[(TopicPartition, FetchData)] = null + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + var partitionData: Seq[(TopicPartition, FetchResponseData.PartitionData)] = null val request = fetchRequest.build() // We can build the map from the request since it contains topic IDs and names. @@ -110,7 +111,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, if (partitionData == null) throw new IllegalStateException(s"Failed to fetch data for partitions ${fetchData.keySet().toArray.mkString(",")}") - partitionData.toMap + partitionData.toMap.asJava } override def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = { @@ -134,8 +135,8 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, new OffsetAndEpoch(localLogStartOffset, epoch.orElse(0)) } - override def fetchEpochEndOffsets(partitions: collection.Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { - partitions.map { case (tp, epochData) => + override def fetchEpochEndOffsets(partitions: util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): util.Map[TopicPartition, EpochEndOffset] = { + partitions.asScala.map { case (tp, epochData) => try { val endOffset = if (epochData.leaderEpoch == UNDEFINED_EPOCH) { new EpochEndOffset() @@ -156,48 +157,52 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, .setPartition(tp.partition) .setErrorCode(Errors.forException(t).code) } - } + }.asJava } - override def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { + override def buildFetch(partitions: util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[util.Optional[ReplicaFetch]] = { // Only include replica in the fetch request if it is not throttled. if (quota.isQuotaExceeded) { - ResultWithPartitions(None, Set.empty) + new ResultWithPartitions(util.Optional.empty(), util.Set.of()) } else { - selectPartitionToFetch(partitions) match { - case Some((tp, fetchState)) => - buildFetchForPartition(tp, fetchState) - case None => - ResultWithPartitions(None, Set.empty) + val selectPartition = selectPartitionToFetch(partitions) + if (selectPartition.isPresent) { + val (tp, fetchState) = selectPartition.get() + buildFetchForPartition(tp, fetchState) + } else { + new ResultWithPartitions(util.Optional.empty(), util.Set.of()) } } } - private def selectPartitionToFetch(partitions: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = { + private def selectPartitionToFetch(partitions: util.Map[TopicPartition, PartitionFetchState]): Optional[(TopicPartition, PartitionFetchState)] = { // Only move one partition at a time to increase its catch-up rate and thus reduce the time spent on // moving any given replica. Replicas are selected in ascending order (lexicographically by topic) from the // partitions that are ready to fetch. Once selected, we will continue fetching the same partition until it // becomes unavailable or is removed. inProgressPartition.foreach { tp => - val fetchStateOpt = partitions.get(tp) + val fetchStateOpt = Option(partitions.get(tp)) fetchStateOpt.filter(_.isReadyForFetch).foreach { fetchState => - return Some((tp, fetchState)) + return Optional.of((tp, fetchState)) } } inProgressPartition = None - val nextPartitionOpt = nextReadyPartition(partitions) + val nextPartitionOpt = nextReadyPartition(partitions.asScala.toMap) nextPartitionOpt.foreach { case (tp, fetchState) => inProgressPartition = Some(tp) info(s"Beginning/resuming copy of partition $tp from offset ${fetchState.fetchOffset}. " + s"Including this partition, there are ${partitions.size} remaining partitions to copy by this thread.") } - nextPartitionOpt + nextPartitionOpt match { + case Some((tp, fetchState)) => Optional.of((tp, fetchState)) + case None => Optional.empty() + } } - private def buildFetchForPartition(topicPartition: TopicPartition, fetchState: PartitionFetchState): ResultWithPartitions[Option[ReplicaFetch]] = { + private def buildFetchForPartition(topicPartition: TopicPartition, fetchState: PartitionFetchState): ResultWithPartitions[Optional[ReplicaFetch]] = { val requestMap = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] val partitionsWithError = mutable.Set[TopicPartition]() @@ -207,7 +212,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, fetchState.lastFetchedEpoch else Optional.empty[Integer] - val topicId = fetchState.topicId.getOrElse(Uuid.ZERO_UUID) + val topicId = fetchState.topicId.orElse(Uuid.ZERO_UUID) requestMap.put(topicPartition, new FetchRequest.PartitionData(topicId, fetchState.fetchOffset, logStartOffset, fetchSize, Optional.of(fetchState.currentLeaderEpoch), lastFetchedEpoch)) } catch { @@ -217,7 +222,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, } val fetchRequestOpt = if (requestMap.isEmpty) { - None + Optional.empty[ReplicaFetch]() } else { val version: Short = if (fetchState.topicId.isEmpty) 12 @@ -226,10 +231,10 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, // Set maxWait and minBytes to 0 because the response should return immediately if // the future log has caught up with the current log of the partition val requestBuilder = FetchRequest.Builder.forReplica(version, replicaId, -1, 0, 0, requestMap).setMaxBytes(maxBytes) - Some(ReplicaFetch(requestMap, requestBuilder)) + Optional.of(new ReplicaFetch(requestMap, requestBuilder)) } - ResultWithPartitions(fetchRequestOpt, partitionsWithError) + new ResultWithPartitions(fetchRequestOpt, partitionsWithError.asJava) } private def nextReadyPartition(partitions: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = { diff --git a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala index c353a82550316..0caa03ec05299 100644 --- a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala +++ b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala @@ -17,7 +17,6 @@ package kafka.server -import kafka.raft.RaftManager import kafka.utils.Logging import org.apache.kafka.clients._ import org.apache.kafka.common.metrics.Metrics @@ -28,6 +27,7 @@ import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{Node, Reconfigurable} +import org.apache.kafka.raft.RaftManager import org.apache.kafka.server.common.{ApiMessageAndVersion, ControllerRequestCompletionHandler, NodeToControllerChannelManager} import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} @@ -37,7 +37,7 @@ import java.util.concurrent.LinkedBlockingDeque import java.util.concurrent.atomic.AtomicReference import scala.collection.Seq import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptionalInt} +import scala.jdk.OptionConverters.{RichOption, RichOptional, RichOptionalInt} case class ControllerInformation( node: Option[Node], @@ -55,8 +55,9 @@ object RaftControllerNodeProvider { raftManager: RaftManager[ApiMessageAndVersion], config: KafkaConfig, ): RaftControllerNodeProvider = { - val controllerListenerName = new ListenerName(config.controllerListenerNames.head) - val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse(controllerListenerName, SecurityProtocol.forName(controllerListenerName.value())) + val controllerListenerName = new ListenerName(config.controllerListenerNames.get(0)) + val controllerSecurityProtocol = Option(config.effectiveListenerSecurityProtocolMap.get(controllerListenerName)) + .getOrElse(SecurityProtocol.forName(controllerListenerName.value())) val controllerSaslMechanism = config.saslMechanismControllerProtocol new RaftControllerNodeProvider( raftManager, @@ -78,10 +79,10 @@ class RaftControllerNodeProvider( val saslMechanism: String ) extends ControllerNodeProvider with Logging { - private def idToNode(id: Int): Option[Node] = raftManager.voterNode(id, listenerName) + private def idToNode(id: Int): Option[Node] = raftManager.client.voterNode(id, listenerName).toScala override def getControllerInfo(): ControllerInformation = - ControllerInformation(raftManager.leaderAndEpoch.leaderId.toScala.flatMap(idToNode), + ControllerInformation(raftManager.client.leaderAndEpoch.leaderId.toScala.flatMap(idToNode), listenerName, securityProtocol, saslMechanism) } diff --git a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala index 2132a962d7617..80d41e3b0cf13 100644 --- a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala @@ -18,11 +18,11 @@ package kafka.server import java.util.{Collections, Optional} -import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.Logging import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetForLeaderTopic, OffsetForLeaderTopicCollection} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset @@ -30,9 +30,11 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse, OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.LeaderEndPoint +import org.apache.kafka.server.{PartitionFetchState, ReplicaFetch, ResultWithPartitions} import scala.jdk.CollectionConverters._ -import scala.collection.{Map, mutable} +import scala.collection.mutable /** * Facilitates fetches from a remote replica leader. @@ -70,7 +72,7 @@ class RemoteLeaderEndPoint(logPrefix: String, override def brokerEndPoint(): BrokerEndPoint = blockingSender.brokerEndPoint() - override def fetch(fetchRequest: FetchRequest.Builder): collection.Map[TopicPartition, FetchData] = { + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { val clientResponse = try { blockingSender.sendRequest(fetchRequest) } catch { @@ -84,10 +86,10 @@ class RemoteLeaderEndPoint(logPrefix: String, if (fetchResponse.error == Errors.FETCH_SESSION_TOPIC_ID_ERROR) { throw Errors.forCode(fetchResponse.error().code()).exception() } else { - Map.empty + java.util.Map.of() } } else { - fetchResponse.responseData(fetchSessionHandler.sessionTopicNames, clientResponse.requestHeader().apiVersion()).asScala + fetchResponse.responseData(fetchSessionHandler.sessionTopicNames, clientResponse.requestHeader().apiVersion()) } } @@ -126,14 +128,14 @@ class RemoteLeaderEndPoint(logPrefix: String, } } - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { if (partitions.isEmpty) { debug("Skipping leaderEpoch request since all partitions do not have an epoch") - return Map.empty + return java.util.Map.of() } val topics = new OffsetForLeaderTopicCollection(partitions.size) - partitions.foreachEntry { (topicPartition, epochData) => + partitions.forEach { (topicPartition, epochData) => var topic = topics.find(topicPartition.topic) if (topic == null) { topic = new OffsetForLeaderTopic().setTopic(topicPartition.topic) @@ -154,40 +156,39 @@ class RemoteLeaderEndPoint(logPrefix: String, val tp = new TopicPartition(offsetForLeaderTopicResult.topic, offsetForLeaderPartitionResult.partition) tp -> offsetForLeaderPartitionResult } - }.toMap + }.toMap.asJava } catch { case t: Throwable => warn(s"Error when sending leader epoch request for $partitions", t) // if we get any unexpected exception, mark all partitions with an error val error = Errors.forException(t) - partitions.map { case (tp, _) => + partitions.asScala.map { case (tp, _) => tp -> new EpochEndOffset() .setPartition(tp.partition) .setErrorCode(error.code) - } + }.asJava } } - override def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { + override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = { val partitionsWithError = mutable.Set[TopicPartition]() - val builder = fetchSessionHandler.newBuilder(partitions.size, false) - partitions.foreachEntry { (topicPartition, fetchState) => + partitions.forEach { (topicPartition, fetchState) => // We will not include a replica in the fetch request if it should be throttled. if (fetchState.isReadyForFetch && !shouldFollowerThrottle(quota, fetchState, topicPartition)) { try { val logStartOffset = replicaManager.localLogOrException(topicPartition).logStartOffset val lastFetchedEpoch = if (isTruncationOnFetchSupported) - fetchState.lastFetchedEpoch + fetchState.lastFetchedEpoch() else Optional.empty[Integer] builder.add(topicPartition, new FetchRequest.PartitionData( - fetchState.topicId.getOrElse(Uuid.ZERO_UUID), - fetchState.fetchOffset, + fetchState.topicId().orElse(Uuid.ZERO_UUID), + fetchState.fetchOffset(), logStartOffset, fetchSize, - Optional.of(fetchState.currentLeaderEpoch), + Optional.of(fetchState.currentLeaderEpoch()), lastFetchedEpoch)) } catch { case _: KafkaStorageException => @@ -200,7 +201,7 @@ class RemoteLeaderEndPoint(logPrefix: String, val fetchData = builder.build() val fetchRequestOpt = if (fetchData.sessionPartitions.isEmpty && fetchData.toForget.isEmpty) { - None + Optional.empty[ReplicaFetch] } else { val metadataVersion = metadataVersionSupplier() val version: Short = if (!fetchData.canUseTopicIds) { @@ -214,10 +215,10 @@ class RemoteLeaderEndPoint(logPrefix: String, .removed(fetchData.toForget) .replaced(fetchData.toReplace) .metadata(fetchData.metadata) - Some(ReplicaFetch(fetchData.sessionPartitions(), requestBuilder)) + Optional.of(new ReplicaFetch(fetchData.sessionPartitions(), requestBuilder)) } - ResultWithPartitions(fetchRequestOpt, partitionsWithError) + new ResultWithPartitions(fetchRequestOpt, partitionsWithError.asJava) } /** diff --git a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala index 7b2d7863077ea..e0473166b365d 100644 --- a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala @@ -20,6 +20,7 @@ package kafka.server import org.apache.kafka.common.TopicPartition import org.apache.kafka.server.common.DirectoryEventHandler import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.storage.log.metrics.BrokerTopicStats class ReplicaAlterLogDirsManager(brokerConfig: KafkaConfig, @@ -35,7 +36,7 @@ class ReplicaAlterLogDirsManager(brokerConfig: KafkaConfig, override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaAlterLogDirsThread = { val threadName = s"ReplicaAlterLogDirsThread-$fetcherId" - val leader = new LocalLeaderEndPoint(sourceBroker, brokerConfig, replicaManager, quotaManager) + val leader: LeaderEndPoint = new LocalLeaderEndPoint(sourceBroker, brokerConfig, replicaManager, quotaManager) new ReplicaAlterLogDirsThread(threadName, leader, failedPartitions, replicaManager, quotaManager, brokerTopicStats, brokerConfig.replicaFetchBackoffMs, directoryEventHandler) } diff --git a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala index 1ac1f9290788d..81bb41100f78a 100644 --- a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala @@ -24,6 +24,8 @@ import org.apache.kafka.common.requests.FetchResponse import org.apache.kafka.server.common.{DirectoryEventHandler, OffsetAndEpoch, TopicIdPartition} import org.apache.kafka.storage.internals.log.{LogAppendInfo, LogStartOffsetIncrementReason} import org.apache.kafka.storage.log.metrics.BrokerTopicStats +import org.apache.kafka.server.LeaderEndPoint +import org.apache.kafka.server.PartitionFetchState import java.util.Optional import java.util.concurrent.ConcurrentHashMap diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala index 4ed5b05311df4..96308fb400f2d 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala @@ -22,12 +22,12 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.LeaderEndPoint class ReplicaFetcherManager(brokerConfig: KafkaConfig, protected val replicaManager: ReplicaManager, metrics: Metrics, time: Time, - threadNamePrefix: Option[String] = None, quotaManager: ReplicationQuotaManager, metadataVersionSupplier: () => MetadataVersion, brokerEpochSupplier: () => Long) @@ -37,14 +37,13 @@ class ReplicaFetcherManager(brokerConfig: KafkaConfig, numFetchers = brokerConfig.numReplicaFetchers) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { - val prefix = threadNamePrefix.map(tp => s"$tp:").getOrElse("") - val threadName = s"${prefix}ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" + val threadName = s"ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" val logContext = new LogContext(s"[ReplicaFetcher replicaId=${brokerConfig.brokerId}, leaderId=${sourceBroker.id}, " + s"fetcherId=$fetcherId] ") val endpoint = new BrokerBlockingSender(sourceBroker, brokerConfig, metrics, time, fetcherId, s"broker-${brokerConfig.brokerId}-fetcher-$fetcherId", logContext) val fetchSessionHandler = new FetchSessionHandler(logContext, sourceBroker.id) - val leader = new RemoteLeaderEndPoint(logContext.logPrefix, endpoint, fetchSessionHandler, brokerConfig, + val leader: LeaderEndPoint = new RemoteLeaderEndPoint(logContext.logPrefix, endpoint, fetchSessionHandler, brokerConfig, replicaManager, quotaManager, metadataVersionSupplier, brokerEpochSupplier) new ReplicaFetcherThread(threadName, leader, brokerConfig, failedPartitions, replicaManager, quotaManager, logContext.logPrefix) diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index a20849d7b8b45..fa2f6bb7f35bd 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -21,6 +21,7 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.requests.FetchResponse import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.storage.internals.log.{LogAppendInfo, LogStartOffsetIncrementReason} +import org.apache.kafka.server.LeaderEndPoint import java.util.Optional import scala.collection.mutable @@ -163,14 +164,9 @@ class ReplicaFetcherThread(name: String, */ override def truncate(tp: TopicPartition, offsetTruncationState: OffsetTruncationState): Unit = { val partition = replicaMgr.getPartitionOrException(tp) - val log = partition.localLogOrException partition.truncateTo(offsetTruncationState.offset, isFuture = false) - if (offsetTruncationState.offset < log.highWatermark) - warn(s"Truncating $tp to offset ${offsetTruncationState.offset} below high watermark " + - s"${log.highWatermark}") - // mark the future replica for truncation only when we do last truncation if (offsetTruncationState.truncationCompleted) replicaMgr.replicaAlterLogDirsManager.markPartitionsForTruncation(brokerConfig.brokerId, tp, diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index fa4978287e194..e01d264f53962 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -18,7 +18,6 @@ package kafka.server import com.yammer.metrics.core.Meter import kafka.cluster.{Partition, PartitionListener} -import kafka.controller.StateChangeLogger import kafka.log.LogManager import kafka.server.HostedPartition.Online import kafka.server.QuotaFactory.QuotaManagers @@ -34,7 +33,7 @@ import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartit import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult} -import org.apache.kafka.common.message.{DescribeLogDirsResponseData, DescribeProducersResponseData, FetchResponseData} +import org.apache.kafka.common.message.{DescribeLogDirsResponseData, DescribeProducersResponseData} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors @@ -48,9 +47,10 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{Exit, Time, Utils} import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig} import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta} +import org.apache.kafka.logger.StateChangeLogger import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.MetadataCache -import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition} +import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition} import org.apache.kafka.server.log.remote.TopicPartitionLog import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.log.remote.storage.RemoteLogManager @@ -59,8 +59,11 @@ import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets, DeleteRecordsPartitionStatus, ListOffsetsPartitionStatus, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.{DelayedShareFetchKey, DelayedShareFetchPartitionKey} import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation +import org.apache.kafka.server.util.timer.{SystemTimer, TimerTask} import org.apache.kafka.server.util.{Scheduler, ShutdownableThread} -import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common} +import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, LogReadResult, common} import org.apache.kafka.storage.internals.checkpoint.{LazyOffsetCheckpoints, OffsetCheckpointFile, OffsetCheckpoints} import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogReadInfo, OffsetResultHolder, RecordValidationException, RemoteLogReadResult, RemoteStorageFetchInfo, UnifiedLog, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -70,13 +73,12 @@ import java.lang.{Long => JLong} import java.nio.file.{Files, Paths} import java.util import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.locks.Lock -import java.util.concurrent.{CompletableFuture, Future, RejectedExecutionException, TimeUnit} +import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, Future, RejectedExecutionException, TimeUnit} import java.util.{Collections, Optional, OptionalInt, OptionalLong} import java.util.function.Consumer import scala.collection.{Map, Seq, Set, immutable, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional} +import scala.jdk.OptionConverters.RichOptional /* * Result metadata of a log append operation on the log @@ -104,64 +106,6 @@ case class LogDeleteRecordsResult(requestedOffset: Long, lowWatermark: Long, exc } } -/** - * Result metadata of a log read operation on the log - * @param info @FetchDataInfo returned by the @Log read - * @param divergingEpoch Optional epoch and end offset which indicates the largest epoch such - * that subsequent records are known to diverge on the follower/consumer - * @param highWatermark high watermark of the local replica - * @param leaderLogStartOffset The log start offset of the leader at the time of the read - * @param leaderLogEndOffset The log end offset of the leader at the time of the read - * @param followerLogStartOffset The log start offset of the follower taken from the Fetch request - * @param fetchTimeMs The time the fetch was received - * @param lastStableOffset Current LSO or None if the result has an exception - * @param preferredReadReplica the preferred read replica to be used for future fetches - * @param exception Exception if error encountered while reading from the log - */ -case class LogReadResult(info: FetchDataInfo, - divergingEpoch: Option[FetchResponseData.EpochEndOffset], - highWatermark: Long, - leaderLogStartOffset: Long, - leaderLogEndOffset: Long, - followerLogStartOffset: Long, - fetchTimeMs: Long, - lastStableOffset: Option[Long], - preferredReadReplica: Option[Int] = None, - exception: Option[Throwable] = None) { - - def error: Errors = exception match { - case None => Errors.NONE - case Some(e) => Errors.forException(e) - } - - def toFetchPartitionData(isReassignmentFetch: Boolean): FetchPartitionData = new FetchPartitionData( - this.error, - this.highWatermark, - this.leaderLogStartOffset, - this.info.records, - this.divergingEpoch.toJava, - if (this.lastStableOffset.isDefined) OptionalLong.of(this.lastStableOffset.get) else OptionalLong.empty(), - this.info.abortedTransactions, - if (this.preferredReadReplica.isDefined) OptionalInt.of(this.preferredReadReplica.get) else OptionalInt.empty(), - isReassignmentFetch) - - override def toString: String = { - "LogReadResult(" + - s"info=$info, " + - s"divergingEpoch=$divergingEpoch, " + - s"highWatermark=$highWatermark, " + - s"leaderLogStartOffset=$leaderLogStartOffset, " + - s"leaderLogEndOffset=$leaderLogEndOffset, " + - s"followerLogStartOffset=$followerLogStartOffset, " + - s"fetchTimeMs=$fetchTimeMs, " + - s"preferredReadReplica=$preferredReadReplica, " + - s"lastStableOffset=$lastStableOffset, " + - s"error=$error" + - ")" - } - -} - /** * Trait to represent the state of hosted partitions. We create a concrete (active) Partition * instance when the broker receives a LeaderAndIsr request from the controller or a metadata @@ -228,34 +172,35 @@ object ReplicaManager { ListOffsetsRequest.LATEST_TIMESTAMP -> 1.toShort, ListOffsetsRequest.MAX_TIMESTAMP -> 7.toShort, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP -> 8.toShort, - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort, + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP -> 11.toShort ) def createLogReadResult(highWatermark: Long, leaderLogStartOffset: Long, leaderLogEndOffset: Long, e: Throwable): LogReadResult = { - LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, + new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + Optional.empty(), highWatermark, leaderLogStartOffset, leaderLogEndOffset, - followerLogStartOffset = -1L, - fetchTimeMs = -1L, - lastStableOffset = None, - exception = Some(e)) + -1L, + -1L, + OptionalLong.empty(), + Optional.of(e)) } def createLogReadResult(e: Throwable): LogReadResult = { - LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, - highWatermark = UnifiedLog.UNKNOWN_OFFSET, - leaderLogStartOffset = UnifiedLog.UNKNOWN_OFFSET, - leaderLogEndOffset = UnifiedLog.UNKNOWN_OFFSET, - followerLogStartOffset = UnifiedLog.UNKNOWN_OFFSET, - fetchTimeMs = -1L, - lastStableOffset = None, - exception = Some(e)) + new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + Optional.empty(), + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + -1L, + OptionalLong.empty(), + Optional.of(e)) } private[server] def isListOffsetsTimestampUnsupported(timestamp: JLong, version: Short): Boolean = { @@ -275,21 +220,24 @@ class ReplicaManager(val config: KafkaConfig, logDirFailureChannel: LogDirFailureChannel, val alterPartitionManager: AlterPartitionManager, val brokerTopicStats: BrokerTopicStats = new BrokerTopicStats(), - val isShuttingDown: AtomicBoolean = new AtomicBoolean(false), delayedProducePurgatoryParam: Option[DelayedOperationPurgatory[DelayedProduce]] = None, delayedFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedFetch]] = None, delayedDeleteRecordsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedDeleteRecords]] = None, delayedRemoteFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteFetch]] = None, delayedRemoteListOffsetsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteListOffsets]] = None, delayedShareFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedShareFetch]] = None, - threadNamePrefix: Option[String] = None, val brokerEpochSupplier: () => Long = () => -1, addPartitionsToTxnManager: Option[AddPartitionsToTxnManager] = None, val directoryEventHandler: DirectoryEventHandler = DirectoryEventHandler.NOOP, val defaultActionQueue: ActionQueue = new DelayedActionQueue ) extends Logging { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + private val metricsPackage = "kafka.server" + private val metricsClassName = "ReplicaManager" + private val metricsGroup = new KafkaMetricsGroup(metricsPackage, metricsClassName) private val addPartitionsToTxnConfig = new AddPartitionsToTxnConfig(config) + private val shareFetchPurgatoryName = "ShareFetch" + private val delayedShareFetchTimer = new SystemTimer(shareFetchPurgatoryName) val delayedProducePurgatory = delayedProducePurgatoryParam.getOrElse( new DelayedOperationPurgatory[DelayedProduce]( @@ -311,17 +259,14 @@ class ReplicaManager(val config: KafkaConfig, "RemoteListOffsets", config.brokerId)) val delayedShareFetchPurgatory = delayedShareFetchPurgatoryParam.getOrElse( new DelayedOperationPurgatory[DelayedShareFetch]( - "ShareFetch", config.brokerId, + shareFetchPurgatoryName, delayedShareFetchTimer, config.brokerId, config.shareGroupConfig.shareFetchPurgatoryPurgeIntervalRequests)) /* epoch of the controller that last changed the leader */ - @volatile private[server] var controllerEpoch: Int = 0 protected val localBrokerId = config.brokerId - protected val allPartitions = new Pool[TopicPartition, HostedPartition]( - valueFactory = Some(tp => HostedPartition.Online(Partition(tp, time, this))) - ) + protected val allPartitions = new ConcurrentHashMap[TopicPartition, HostedPartition] private val replicaStateChangeLock = new Object - val replicaFetcherManager = createReplicaFetcherManager(metrics, time, threadNamePrefix, quotaManagers.follower) + val replicaFetcherManager = createReplicaFetcherManager(metrics, time, quotaManagers.follower) private[server] val replicaAlterLogDirsManager = createReplicaAlterLogDirsManager(quotaManagers.alterLogDirs, brokerTopicStats) private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false) @volatile private[server] var highWatermarkCheckpoints: Map[String, OffsetCheckpointFile] = logManager.liveLogDirs.map(dir => @@ -330,7 +275,7 @@ class ReplicaManager(val config: KafkaConfig, @volatile private var isInControlledShutdown = false this.logIdent = s"[ReplicaManager broker=$localBrokerId] " - protected val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None) + protected val stateChangeLogger = new StateChangeLogger(localBrokerId) private var logDirFailureHandler: LogDirFailureHandler = _ @@ -402,7 +347,7 @@ class ReplicaManager(val config: KafkaConfig, } private def maybeRemoveTopicMetrics(topic: String): Unit = { - val topicHasNonOfflinePartition = allPartitions.values.exists { + val topicHasNonOfflinePartition = allPartitions.values.asScala.exists { case online: HostedPartition.Online => topic == online.partition.topic case HostedPartition.None | HostedPartition.Offline(_) => false } @@ -450,6 +395,14 @@ class ReplicaManager(val config: KafkaConfig, delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchKeys) } + /** + * Add a timer task to the delayedShareFetchTimer. + * @param timerTask The timer task to be added to the delayedShareFetchTimer + */ + private[server] def addShareFetchTimerRequest(timerTask: TimerTask): Unit = { + delayedShareFetchTimer.add(timerTask) + } + /** * Registers the provided listener to the partition iff the partition is online. */ @@ -533,6 +486,11 @@ class ReplicaManager(val config: KafkaConfig, errorMap } + def topicIdPartition(topicPartition: TopicPartition): TopicIdPartition = { + val topicId = metadataCache.getTopicId(topicPartition.topic()) + new TopicIdPartition(topicId, topicPartition) + } + def getPartition(topicPartition: TopicPartition): HostedPartition = { Option(allPartitions.get(topicPartition)).getOrElse(HostedPartition.None) } @@ -547,10 +505,15 @@ class ReplicaManager(val config: KafkaConfig, // Visible for testing def createPartition(topicPartition: TopicPartition): Partition = { val partition = Partition(topicPartition, time, this) - allPartitions.put(topicPartition, HostedPartition.Online(partition)) + addOnlinePartition(topicPartition, partition) partition } + // Visible for testing + private[server] def addOnlinePartition(topicPartition: TopicPartition, partition: Partition): Unit = { + allPartitions.put(topicPartition, HostedPartition.Online(partition)) + } + def onlinePartition(topicPartition: TopicPartition): Option[Partition] = { getPartition(topicPartition) match { case HostedPartition.Online(partition) => Some(partition) @@ -561,14 +524,14 @@ class ReplicaManager(val config: KafkaConfig, // An iterator over all non offline partitions. This is a weakly consistent iterator; a partition made offline after // the iterator has been constructed could still be returned by this iterator. private def onlinePartitionsIterator: Iterator[Partition] = { - allPartitions.values.iterator.flatMap { + allPartitions.values.asScala.iterator.flatMap { case HostedPartition.Online(partition) => Some(partition) case _ => None } } private def offlinePartitionCount: Int = { - allPartitions.values.iterator.count(_.getClass == HostedPartition.Offline.getClass) + allPartitions.values.asScala.iterator.count(_.getClass == HostedPartition.Offline.getClass) } def getPartitionOrException(topicPartition: TopicPartition): Partition = { @@ -583,6 +546,27 @@ class ReplicaManager(val config: KafkaConfig, } } + def getPartitionOrException(topicIdPartition: TopicIdPartition): Partition = { + getPartitionOrError(topicIdPartition.topicPartition()) match { + case Left(Errors.KAFKA_STORAGE_ERROR) => + throw new KafkaStorageException(s"Partition ${topicIdPartition.topicPartition()} is in an offline log directory") + + case Left(error) => + throw error.exception(s"Error while fetching partition state for ${topicIdPartition.topicPartition()}") + + case Right(partition) => + // Get topic id for an existing partition from disk if topicId is none get it from the metadata cache + val topicId = partition.topicId.getOrElse(metadataCache.getTopicId(topicIdPartition.topic())) + // If topic id is set to zero_uuid fall back to non topic id aware behaviour + val topicIdNotProvided = topicIdPartition.topicId() == Uuid.ZERO_UUID + if (topicIdNotProvided || topicId == topicIdPartition.topicId()) { + partition + } else { + throw new UnknownTopicIdException(s"Partition $topicIdPartition's topic id doesn't match the one on disk $topicId.'") + } + } + } + def getPartitionOrError(topicPartition: TopicPartition): Either[Errors, Partition] = { getPartition(topicPartition) match { case HostedPartition.Online(partition) => @@ -638,7 +622,9 @@ class ReplicaManager(val config: KafkaConfig, * required acks. * @param internalTopicsAllowed boolean indicating whether internal topics can be appended to * @param origin source of the append request (ie, client, replication, coordinator) - * @param entriesPerPartition the records per partition to be appended + * @param entriesPerPartition the records per topic partition to be appended. + * If topic partition contains Uuid.ZERO_UUID as topicId the method + * will fall back to the old behaviour and rely on topic name. * @param requestLocal container for the stateful instances scoped to this request -- this must correspond to the * thread calling this method * @param actionQueue the action queue to use. ReplicaManager#defaultActionQueue is used by default. @@ -648,11 +634,11 @@ class ReplicaManager(val config: KafkaConfig, requiredAcks: Short, internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicPartition, MemoryRecords], + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], requestLocal: RequestLocal = RequestLocal.noCaching, actionQueue: ActionQueue = this.defaultActionQueue, verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty - ): Map[TopicPartition, LogAppendResult] = { + ): Map[TopicIdPartition, LogAppendResult] = { val startTimeMs = time.milliseconds val localProduceResultsWithTopicId = appendToLocalLog( internalTopicsAllowed = internalTopicsAllowed, @@ -666,9 +652,7 @@ class ReplicaManager(val config: KafkaConfig, addCompletePurgatoryAction(actionQueue, localProduceResultsWithTopicId) - localProduceResultsWithTopicId.map { - case (k, v) => (k.topicPartition, v) - } + localProduceResultsWithTopicId } /** @@ -684,9 +668,10 @@ class ReplicaManager(val config: KafkaConfig, * @param requiredAcks number of replicas who must acknowledge the append before sending the response * @param internalTopicsAllowed boolean indicating whether internal topics can be appended to * @param origin source of the append request (ie, client, replication, coordinator) - * @param entriesPerPartition the records per partition to be appended + * @param entriesPerPartition the records per topic partition to be appended. + * If topic partition contains Uuid.ZERO_UUID as topicId the method + * will fall back to the old behaviour and rely on topic name. * @param responseCallback callback for sending the response - * @param delayedProduceLock lock for the delayed actions * @param recordValidationStatsCallback callback for updating stats on record conversions * @param requestLocal container for the stateful instances scoped to this request -- this must correspond to the * thread calling this method @@ -696,10 +681,9 @@ class ReplicaManager(val config: KafkaConfig, requiredAcks: Short, internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicPartition, MemoryRecords], - responseCallback: Map[TopicPartition, PartitionResponse] => Unit, - delayedProduceLock: Option[Lock] = None, - recordValidationStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, + recordValidationStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty): Unit = { if (!isValidRequiredAcks(requiredAcks)) { @@ -725,7 +709,6 @@ class ReplicaManager(val config: KafkaConfig, maybeAddDelayedProduce( requiredAcks, - delayedProduceLock, timeout, entriesPerPartition, localProduceResults, @@ -755,19 +738,20 @@ class ReplicaManager(val config: KafkaConfig, requiredAcks: Short, internalTopicsAllowed: Boolean, transactionalId: String, - entriesPerPartition: Map[TopicPartition, MemoryRecords], - responseCallback: Map[TopicPartition, PartitionResponse] => Unit, - recordValidationStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, + recordValidationStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, transactionSupportedOperation: TransactionSupportedOperation): Unit = { val transactionalProducerInfo = mutable.HashSet[(Long, Short)]() val topicPartitionBatchInfo = mutable.Map[TopicPartition, Int]() - entriesPerPartition.foreachEntry { (topicPartition, records) => + val topicIds = entriesPerPartition.keys.map(tp => tp.topic() -> tp.topicId()).toMap + entriesPerPartition.foreachEntry { (topicIdPartition, records) => // Produce requests (only requests that require verification) should only have one batch per partition in "batches" but check all just to be safe. val transactionalBatches = records.batches.asScala.filter(batch => batch.hasProducerId && batch.isTransactional) transactionalBatches.foreach(batch => transactionalProducerInfo.add(batch.producerId, batch.producerEpoch)) - if (transactionalBatches.nonEmpty) topicPartitionBatchInfo.put(topicPartition, records.firstBatch.baseSequence) + if (transactionalBatches.nonEmpty) topicPartitionBatchInfo.put(topicIdPartition.topicPartition(), records.firstBatch.baseSequence) } if (transactionalProducerInfo.size > 1) { throw new InvalidPidMappingException("Transactional records contained more than one producer ID") @@ -776,7 +760,7 @@ class ReplicaManager(val config: KafkaConfig, def postVerificationCallback(newRequestLocal: RequestLocal, results: (Map[TopicPartition, Errors], Map[TopicPartition, VerificationGuard])): Unit = { val (preAppendErrors, verificationGuards) = results - val errorResults = preAppendErrors.map { + val errorResults: Map[TopicIdPartition, LogAppendResult] = preAppendErrors.map { case (topicPartition, error) => // translate transaction coordinator errors to known producer response errors val customException = @@ -802,17 +786,21 @@ class ReplicaManager(val config: KafkaConfig, } case _ => None } - topicPartition -> LogAppendResult( + new TopicIdPartition(topicIds.getOrElse(topicPartition.topic(), Uuid.ZERO_UUID), topicPartition) -> LogAppendResult( LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(customException.getOrElse(error.exception)), hasCustomErrorMessage = customException.isDefined ) } - val entriesWithoutErrorsPerPartition = entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) } + // In non-transaction paths, errorResults is typically empty, so we can + // directly use entriesPerPartition instead of creating a new filtered collection + val entriesWithoutErrorsPerPartition = + if (errorResults.nonEmpty) entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) } + else entriesPerPartition val preAppendPartitionResponses = buildProducePartitionStatus(errorResults).map { case (k, status) => k -> status.responseStatus } - def newResponseCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { + def newResponseCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { responseCallback(preAppendPartitionResponses ++ responses) } @@ -885,10 +873,10 @@ class ReplicaManager(val config: KafkaConfig, } private def buildProducePartitionStatus( - results: Map[TopicPartition, LogAppendResult] - ): Map[TopicPartition, ProducePartitionStatus] = { - results.map { case (topicPartition, result) => - topicPartition -> ProducePartitionStatus( + results: Map[TopicIdPartition, LogAppendResult] + ): Map[TopicIdPartition, ProducePartitionStatus] = { + results.map { case (topicIdPartition, result) => + topicIdPartition -> ProducePartitionStatus( result.info.lastOffset + 1, // required offset new PartitionResponse( result.error, @@ -904,19 +892,19 @@ class ReplicaManager(val config: KafkaConfig, private def addCompletePurgatoryAction( actionQueue: ActionQueue, - appendResults: Map[TopicOptionalIdPartition, LogAppendResult] + appendResults: Map[TopicIdPartition, LogAppendResult] ): Unit = { actionQueue.add { - () => appendResults.foreach { case (topicOptionalIdPartition, result) => - val requestKey = new TopicPartitionOperationKey(topicOptionalIdPartition.topicPartition) + () => appendResults.foreach { case (topicIdPartition, result) => + val requestKey = new TopicPartitionOperationKey(topicIdPartition.topicPartition) result.info.leaderHwChange match { case LeaderHwChange.INCREASED => // some delayed operations may be unblocked after HW changed delayedProducePurgatory.checkAndComplete(requestKey) delayedFetchPurgatory.checkAndComplete(requestKey) delayedDeleteRecordsPurgatory.checkAndComplete(requestKey) - if (topicOptionalIdPartition.topicId.isPresent) delayedShareFetchPurgatory.checkAndComplete(new DelayedShareFetchPartitionKey( - topicOptionalIdPartition.topicId.get, topicOptionalIdPartition.partition)) + if (topicIdPartition.topicId != Uuid.ZERO_UUID) delayedShareFetchPurgatory.checkAndComplete(new DelayedShareFetchPartitionKey( + topicIdPartition.topicId, topicIdPartition.partition)) case LeaderHwChange.SAME => // probably unblock some follower fetch requests since log end offset has been updated delayedFetchPurgatory.checkAndComplete(requestKey) @@ -929,17 +917,16 @@ class ReplicaManager(val config: KafkaConfig, private def maybeAddDelayedProduce( requiredAcks: Short, - delayedProduceLock: Option[Lock], timeoutMs: Long, - entriesPerPartition: Map[TopicPartition, MemoryRecords], - initialAppendResults: Map[TopicPartition, LogAppendResult], - initialProduceStatus: Map[TopicPartition, ProducePartitionStatus], - responseCallback: Map[TopicPartition, PartitionResponse] => Unit, + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + initialAppendResults: Map[TopicIdPartition, LogAppendResult], + initialProduceStatus: Map[TopicIdPartition, ProducePartitionStatus], + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, ): Unit = { if (delayedProduceRequestRequired(requiredAcks, entriesPerPartition, initialAppendResults)) { // create delayed produce operation val produceMetadata = ProduceMetadata(requiredAcks, initialProduceStatus) - val delayedProduce = new DelayedProduce(timeoutMs, produceMetadata, this, responseCallback, delayedProduceLock) + val delayedProduce = new DelayedProduce(timeoutMs, produceMetadata, this, responseCallback) // create a list of (topic, partition) pairs to use as keys for this delayed produce operation val producerRequestKeys = entriesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toList @@ -955,12 +942,13 @@ class ReplicaManager(val config: KafkaConfig, } } - private def sendInvalidRequiredAcksResponse(entries: Map[TopicPartition, MemoryRecords], - responseCallback: Map[TopicPartition, PartitionResponse] => Unit): Unit = { + private def sendInvalidRequiredAcksResponse( + entries: Map[TopicIdPartition, MemoryRecords], + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit): Unit = { // If required.acks is outside accepted range, something is wrong with the client // Just return an error and don't handle the request at all - val responseStatus = entries.map { case (topicPartition, _) => - topicPartition -> new PartitionResponse( + val responseStatus = entries.map { case (topicIdPartition, _) => + topicIdPartition -> new PartitionResponse( Errors.INVALID_REQUIRED_ACKS, LogAppendInfo.UNKNOWN_LOG_APPEND_INFO.firstOffset, RecordBatch.NO_TIMESTAMP, @@ -1078,18 +1066,18 @@ class ReplicaManager(val config: KafkaConfig, } def invokeCallback( - verificationErrors: Map[TopicPartition, Errors] + verificationErrors: java.util.Map[TopicPartition, Errors] ): Unit = { - callback((errors ++ verificationErrors, verificationGuards.toMap)) + callback((errors ++ verificationErrors.asScala, verificationGuards.toMap)) } addPartitionsToTxnManager.foreach(_.addOrVerifyTransaction( - transactionalId = transactionalId, - producerId = producerId, - producerEpoch = producerEpoch, - topicPartitions = verificationGuards.keys.toSeq, - callback = invokeCallback, - transactionSupportedOperation = transactionSupportedOperation + transactionalId, + producerId, + producerEpoch, + verificationGuards.keys.toSeq.asJava, + invokeCallback, + transactionSupportedOperation )) } @@ -1174,7 +1162,15 @@ class ReplicaManager(val config: KafkaConfig, // Stop current replica movement if the destinationDir is different from the existing destination log directory if (partition.futureReplicaDirChanged(destinationDir)) { replicaAlterLogDirsManager.removeFetcherForPartitions(Set(topicPartition)) + // There's a chance that the future replica can be promoted between the check for futureReplicaDirChanged + // and call to removeFetcherForPartitions. We want to avoid resuming cleaning again in that case to avoid + // an IllegalStateException. The presence of a future log after the call to removeFetcherForPartitions + // implies that it has not been promoted as both synchronize on partitionMapLock. + val futureReplicaPromoted = partition.futureLog.isEmpty partition.removeFutureLocalReplica() + if (!futureReplicaPromoted) { + logManager.resumeCleaning(topicPartition) + } } case HostedPartition.Offline(_) => throw new KafkaStorageException(s"Partition $topicPartition is offline") @@ -1236,10 +1232,10 @@ class ReplicaManager(val config: KafkaConfig, * 2) size and lag of current and future logs for each partition in the given log directory. Only logs of the queried partitions * are included. There may be future logs (which will replace the current logs of the partition in the future) on the broker after KIP-113 is implemented. */ - def describeLogDirs(partitions: Set[TopicPartition]): List[DescribeLogDirsResponseData.DescribeLogDirsResult] = { + def describeLogDirs(partitions: Set[TopicPartition]): util.List[DescribeLogDirsResponseData.DescribeLogDirsResult] = { val logsByDir = logManager.allLogs.groupBy(log => log.parentDir) - config.logDirs.toSet.map { logDir: String => + config.logDirs.stream().distinct().map(logDir => { val file = Paths.get(logDir) val absolutePath = file.toAbsolutePath.toString try { @@ -1268,11 +1264,11 @@ class ReplicaManager(val config: KafkaConfig, } val describeLogDirsResult = new DescribeLogDirsResponseData.DescribeLogDirsResult() - .setLogDir(absolutePath).setTopics(topicInfos) + .setLogDir(absolutePath) + .setTopics(topicInfos) .setErrorCode(Errors.NONE.code) - .setTotalBytes(totalBytes).setUsableBytes(usableBytes) - if (!topicInfos.isEmpty) - describeLogDirsResult.setTopics(topicInfos) + .setTotalBytes(totalBytes) + .setUsableBytes(usableBytes) describeLogDirsResult } catch { @@ -1287,7 +1283,7 @@ class ReplicaManager(val config: KafkaConfig, .setLogDir(absolutePath) .setErrorCode(Errors.forException(t).code) } - }.toList + }).toList() } // See: https://bugs.openjdk.java.net/browse/JDK-8162520 @@ -1375,8 +1371,8 @@ class ReplicaManager(val config: KafkaConfig, // 2. there is data to append // 3. at least one partition append was successful (fewer errors than partitions) private def delayedProduceRequestRequired(requiredAcks: Short, - entriesPerPartition: Map[TopicPartition, MemoryRecords], - localProduceResults: Map[TopicPartition, LogAppendResult]): Boolean = { + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + localProduceResults: Map[TopicIdPartition, LogAppendResult]): Boolean = { requiredAcks == -1 && entriesPerPartition.nonEmpty && localProduceResults.values.count(_.exception.isDefined) < entriesPerPartition.size @@ -1391,21 +1387,21 @@ class ReplicaManager(val config: KafkaConfig, */ private def appendToLocalLog(internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicPartition, MemoryRecords], + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], requiredAcks: Short, requestLocal: RequestLocal, verificationGuards: Map[TopicPartition, VerificationGuard]): - Map[TopicOptionalIdPartition, LogAppendResult] = { + Map[TopicIdPartition, LogAppendResult] = { val traceEnabled = isTraceEnabled - def processFailedRecord(topicPartition: TopicPartition, t: Throwable) = { - val logStartOffset = onlinePartition(topicPartition).map(_.logStartOffset).getOrElse(-1L) - brokerTopicStats.topicStats(topicPartition.topic).failedProduceRequestRate.mark() + def processFailedRecord(topicIdPartition: TopicIdPartition, t: Throwable) = { + val logStartOffset = onlinePartition(topicIdPartition.topicPartition()).map(_.logStartOffset).getOrElse(-1L) + brokerTopicStats.topicStats(topicIdPartition.topic).failedProduceRequestRate.mark() brokerTopicStats.allTopicsStats.failedProduceRequestRate.mark() t match { case _: InvalidProducerEpochException => - info(s"Error processing append operation on partition $topicPartition", t) + info(s"Error processing append operation on partition $topicIdPartition", t) case _ => - error(s"Error processing append operation on partition $topicPartition", t) + error(s"Error processing append operation on partition $topicIdPartition", t) } logStartOffset @@ -1414,37 +1410,35 @@ class ReplicaManager(val config: KafkaConfig, if (traceEnabled) trace(s"Append [$entriesPerPartition] to local log") - entriesPerPartition.map { case (topicPartition, records) => - brokerTopicStats.topicStats(topicPartition.topic).totalProduceRequestRate.mark() + entriesPerPartition.map { case (topicIdPartition, records) => + brokerTopicStats.topicStats(topicIdPartition.topic).totalProduceRequestRate.mark() brokerTopicStats.allTopicsStats.totalProduceRequestRate.mark() // reject appending to internal topics if it is not allowed - if (Topic.isInternal(topicPartition.topic) && !internalTopicsAllowed) { - (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult( + if (Topic.isInternal(topicIdPartition.topic) && !internalTopicsAllowed) { + (topicIdPartition, LogAppendResult( LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, - Some(new InvalidTopicException(s"Cannot append to internal topic ${topicPartition.topic}")), + Some(new InvalidTopicException(s"Cannot append to internal topic ${topicIdPartition.topic}")), hasCustomErrorMessage = false)) } else { try { - val partition = getPartitionOrException(topicPartition) + val partition = getPartitionOrException(topicIdPartition) val info = partition.appendRecordsToLeader(records, origin, requiredAcks, requestLocal, - verificationGuards.getOrElse(topicPartition, VerificationGuard.SENTINEL)) + verificationGuards.getOrElse(topicIdPartition.topicPartition(), VerificationGuard.SENTINEL)) val numAppendedMessages = info.numMessages // update stats for successfully appended bytes and messages as bytesInRate and messageInRate - brokerTopicStats.topicStats(topicPartition.topic).bytesInRate.mark(records.sizeInBytes) + brokerTopicStats.topicStats(topicIdPartition.topic).bytesInRate.mark(records.sizeInBytes) brokerTopicStats.allTopicsStats.bytesInRate.mark(records.sizeInBytes) - brokerTopicStats.topicStats(topicPartition.topic).messagesInRate.mark(numAppendedMessages) + brokerTopicStats.topicStats(topicIdPartition.topic).messagesInRate.mark(numAppendedMessages) brokerTopicStats.allTopicsStats.messagesInRate.mark(numAppendedMessages) if (traceEnabled) - trace(s"${records.sizeInBytes} written to log $topicPartition beginning at offset " + + trace(s"${records.sizeInBytes} written to log $topicIdPartition beginning at offset " + s"${info.firstOffset} and ending at offset ${info.lastOffset}") - var topicId: Optional[Uuid] = Optional.empty() - if (partition.topicId.isDefined) topicId = Optional.of(partition.topicId.get) + (topicIdPartition, LogAppendResult(info, exception = None, hasCustomErrorMessage = false)) - (new TopicOptionalIdPartition(topicId, topicPartition), LogAppendResult(info, exception = None, hasCustomErrorMessage = false)) } catch { // NOTE: Failed produce requests metric is not incremented for known exceptions // it is supposed to indicate un-expected failures of a broker in handling a produce request @@ -1453,16 +1447,17 @@ class ReplicaManager(val config: KafkaConfig, _: RecordTooLargeException | _: RecordBatchTooLargeException | _: CorruptRecordException | - _: KafkaStorageException) => - (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(e), hasCustomErrorMessage = false)) + _: KafkaStorageException | + _: UnknownTopicIdException) => + (topicIdPartition, LogAppendResult(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(e), hasCustomErrorMessage = false)) case rve: RecordValidationException => - val logStartOffset = processFailedRecord(topicPartition, rve.invalidException) + val logStartOffset = processFailedRecord(topicIdPartition, rve.invalidException) val recordErrors = rve.recordErrors - (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithAdditionalInfo(logStartOffset, recordErrors), + (topicIdPartition, LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithAdditionalInfo(logStartOffset, recordErrors), Some(rve.invalidException), hasCustomErrorMessage = true)) case t: Throwable => - val logStartOffset = processFailedRecord(topicPartition, t) - (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithLogStartOffset(logStartOffset), + val logStartOffset = processFailedRecord(topicIdPartition, t) + (topicIdPartition, LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithLogStartOffset(logStartOffset), Some(t), hasCustomErrorMessage = false)) } } @@ -1486,7 +1481,7 @@ class ReplicaManager(val config: KafkaConfig, if (duplicatePartitions.contains(topicPartition)) { debug(s"OffsetRequest with correlation id $correlationId from client $clientId on partition $topicPartition " + s"failed because the partition is duplicated in the request.") - statusByPartition += topicPartition -> + statusByPartition += topicPartition -> ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.INVALID_REQUEST, partition))).build() } else if (isListOffsetsTimestampUnsupported(partition.timestamp(), version)) { statusByPartition += topicPartition -> @@ -1552,7 +1547,7 @@ class ReplicaManager(val config: KafkaConfig, _ : UnsupportedForMessageFormatException) => debug(s"Offset request with correlation id $correlationId from client $clientId on " + s"partition $topicPartition failed due to ${e.getMessage}") - statusByPartition += topicPartition -> + statusByPartition += topicPartition -> ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.forException(e), partition))).build() // Only V5 and newer ListOffset calls should get OFFSET_NOT_AVAILABLE case e: OffsetNotAvailableException => @@ -1605,15 +1600,18 @@ class ReplicaManager(val config: KafkaConfig, } /** - * Returns [[LogReadResult]] with error if a task for RemoteStorageFetchInfo could not be scheduled successfully - * else returns [[None]]. + * Initiates an asynchronous remote storage fetch operation for the given remote fetch information. + * + * This method schedules a remote fetch task with the remote log manager and sets up the necessary + * completion handling for the operation. The remote fetch result will be used to populate the + * delayed remote fetch purgatory when completed. + * + * @param remoteFetchInfo The remote storage fetch information + * + * @return A tuple containing the remote fetch task and the remote fetch result */ - private def processRemoteFetch(remoteFetchInfo: RemoteStorageFetchInfo, - params: FetchParams, - responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit, - logReadResults: Seq[(TopicIdPartition, LogReadResult)], - fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)]): Option[LogReadResult] = { - val key = new TopicPartitionOperationKey(remoteFetchInfo.topicPartition.topic(), remoteFetchInfo.topicPartition.partition()) + private def processRemoteFetch(remoteFetchInfo: RemoteStorageFetchInfo): (Future[Void], CompletableFuture[RemoteLogReadResult]) = { + val key = new TopicPartitionOperationKey(remoteFetchInfo.topicIdPartition) val remoteFetchResult = new CompletableFuture[RemoteLogReadResult] var remoteFetchTask: Future[Void] = null try { @@ -1623,31 +1621,39 @@ class ReplicaManager(val config: KafkaConfig, }) } catch { case e: RejectedExecutionException => - // Return the error if any in scheduling the remote fetch task - warn("Unable to fetch data from remote storage", e) - return Some(createLogReadResult(e)) + warn(s"Unable to fetch data from remote storage for remoteFetchInfo: $remoteFetchInfo", e) + // Store the error in RemoteLogReadResult if any in scheduling the remote fetch task. + // It will be sent back to the client in DelayedRemoteFetch along with other successful remote fetch results. + remoteFetchResult.complete(new RemoteLogReadResult(Optional.empty, Optional.of(e))) } - val remoteFetchMaxWaitMs = config.remoteLogManagerConfig.remoteFetchMaxWaitMs().toLong - val remoteFetch = new DelayedRemoteFetch(remoteFetchTask, remoteFetchResult, remoteFetchInfo, remoteFetchMaxWaitMs, - fetchPartitionStatus, params, logReadResults, this, responseCallback) - delayedRemoteFetchPurgatory.tryCompleteElseWatch(remoteFetch, util.Collections.singletonList(key)) - None - } - - private def buildPartitionToFetchPartitionData(logReadResults: Seq[(TopicIdPartition, LogReadResult)], - remoteFetchTopicPartition: TopicPartition, - error: LogReadResult): Seq[(TopicIdPartition, FetchPartitionData)] = { - logReadResults.map { case (tp, result) => - val fetchPartitionData = { - if (tp.topicPartition().equals(remoteFetchTopicPartition)) - error - else - result - }.toFetchPartitionData(false) + (remoteFetchTask, remoteFetchResult) + } - tp -> fetchPartitionData + /** + * Process all remote fetches by creating async read tasks and handling them in DelayedRemoteFetch collectively. + */ + private def processRemoteFetches(remoteFetchInfos: util.HashMap[TopicIdPartition, RemoteStorageFetchInfo], + params: FetchParams, + responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit, + logReadResults: Seq[(TopicIdPartition, LogReadResult)], + remoteFetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)]): Unit = { + val remoteFetchTasks = new util.HashMap[TopicIdPartition, Future[Void]] + val remoteFetchResults = new util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]] + + remoteFetchInfos.forEach { (topicIdPartition, remoteFetchInfo) => + val (task, result) = processRemoteFetch(remoteFetchInfo) + remoteFetchTasks.put(topicIdPartition, task) + remoteFetchResults.put(topicIdPartition, result) } + + val remoteFetchMaxWaitMs = config.remoteLogManagerConfig.remoteFetchMaxWaitMs().toLong + val remoteFetch = new DelayedRemoteFetch(remoteFetchTasks, remoteFetchResults, remoteFetchInfos, remoteFetchMaxWaitMs, + remoteFetchPartitionStatus, params, logReadResults, this, responseCallback) + + // create a list of (topic, partition) pairs to use as keys for this delayed fetch operation + val delayedFetchKeys = remoteFetchPartitionStatus.map { case (tp, _) => new TopicPartitionOperationKey(tp) }.toList + delayedRemoteFetchPurgatory.tryCompleteElseWatch(remoteFetch, delayedFetchKeys.asJava) } /** @@ -1665,8 +1671,8 @@ class ReplicaManager(val config: KafkaConfig, var bytesReadable: Long = 0 var errorReadingData = false - // The 1st topic-partition that has to be read from remote storage - var remoteFetchInfo: Optional[RemoteStorageFetchInfo] = Optional.empty() + // topic-partitions that have to be read from remote storage + val remoteFetchInfos = new util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() var hasDivergingEpoch = false var hasPreferredReadReplica = false @@ -1677,12 +1683,12 @@ class ReplicaManager(val config: KafkaConfig, brokerTopicStats.allTopicsStats.totalFetchRequestRate.mark() if (logReadResult.error != Errors.NONE) errorReadingData = true - if (!remoteFetchInfo.isPresent && logReadResult.info.delayedRemoteStorageFetch.isPresent) { - remoteFetchInfo = logReadResult.info.delayedRemoteStorageFetch + if (logReadResult.info.delayedRemoteStorageFetch.isPresent) { + remoteFetchInfos.put(topicIdPartition, logReadResult.info.delayedRemoteStorageFetch.get()) } - if (logReadResult.divergingEpoch.nonEmpty) + if (logReadResult.divergingEpoch.isPresent) hasDivergingEpoch = true - if (logReadResult.preferredReadReplica.nonEmpty) + if (logReadResult.preferredReadReplica.isPresent) hasPreferredReadReplica = true bytesReadable = bytesReadable + logReadResult.info.records.sizeInBytes logReadResultMap.put(topicIdPartition, logReadResult) @@ -1695,7 +1701,7 @@ class ReplicaManager(val config: KafkaConfig, // 4) some error happens while reading data // 5) we found a diverging epoch // 6) has a preferred read replica - if (!remoteFetchInfo.isPresent && (params.maxWaitMs <= 0 || fetchInfos.isEmpty || bytesReadable >= params.minBytes || errorReadingData || + if (remoteFetchInfos.isEmpty && (params.maxWaitMs <= 0 || fetchInfos.isEmpty || bytesReadable >= params.minBytes || errorReadingData || hasDivergingEpoch || hasPreferredReadReplica)) { val fetchPartitionData = logReadResults.map { case (tp, result) => val isReassignmentFetch = params.isFromFollower && isAddingReplica(tp.topicPartition, params.replicaId) @@ -1712,15 +1718,8 @@ class ReplicaManager(val config: KafkaConfig, }) } - if (remoteFetchInfo.isPresent) { - val maybeLogReadResultWithError = processRemoteFetch(remoteFetchInfo.get(), params, responseCallback, logReadResults, fetchPartitionStatus) - if (maybeLogReadResultWithError.isDefined) { - // If there is an error in scheduling the remote fetch task, return what we currently have - // (the data read from local log segment for the other topic-partitions) and an error for the topic-partition - // that we couldn't read from remote storage - val partitionToFetchPartitionData = buildPartitionToFetchPartitionData(logReadResults, remoteFetchInfo.get().topicPartition, maybeLogReadResultWithError.get) - responseCallback(partitionToFetchPartitionData) - } + if (!remoteFetchInfos.isEmpty) { + processRemoteFetches(remoteFetchInfos, params, responseCallback, logReadResults, fetchPartitionStatus.toSeq) } else { // If there is not enough data to respond and there is no remote data, we will let the fetch request // wait for new data. @@ -1799,16 +1798,16 @@ class ReplicaManager(val config: KafkaConfig, } // If a preferred read-replica is set, skip the read val offsetSnapshot = partition.fetchOffsetSnapshot(fetchInfo.currentLeaderEpoch, fetchOnlyFromLeader = false) - LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, - highWatermark = offsetSnapshot.highWatermark.messageOffset, - leaderLogStartOffset = offsetSnapshot.logStartOffset, - leaderLogEndOffset = offsetSnapshot.logEndOffset.messageOffset, - followerLogStartOffset = followerLogStartOffset, - fetchTimeMs = -1L, - lastStableOffset = Some(offsetSnapshot.lastStableOffset.messageOffset), - preferredReadReplica = preferredReadReplica, - exception = None) + new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + Optional.empty(), + offsetSnapshot.highWatermark.messageOffset, + offsetSnapshot.logStartOffset, + offsetSnapshot.logEndOffset.messageOffset, + followerLogStartOffset, + -1L, + OptionalLong.of(offsetSnapshot.lastStableOffset.messageOffset), + if (preferredReadReplica.isDefined) OptionalInt.of(preferredReadReplica.get) else OptionalInt.empty(), + Optional.empty()) } else { log = partition.localLogWithEpochOrThrow(fetchInfo.currentLeaderEpoch, params.fetchOnlyLeader()) @@ -1823,16 +1822,16 @@ class ReplicaManager(val config: KafkaConfig, val fetchDataInfo = checkFetchDataInfo(partition, readInfo.fetchedData) - LogReadResult(info = fetchDataInfo, - divergingEpoch = readInfo.divergingEpoch.toScala, - highWatermark = readInfo.highWatermark, - leaderLogStartOffset = readInfo.logStartOffset, - leaderLogEndOffset = readInfo.logEndOffset, - followerLogStartOffset = followerLogStartOffset, - fetchTimeMs = fetchTimeMs, - lastStableOffset = Some(readInfo.lastStableOffset), - preferredReadReplica = preferredReadReplica, - exception = None + new LogReadResult(fetchDataInfo, + readInfo.divergingEpoch, + readInfo.highWatermark, + readInfo.logStartOffset, + readInfo.logEndOffset, + followerLogStartOffset, + fetchTimeMs, + OptionalLong.of(readInfo.lastStableOffset), + if (preferredReadReplica.isDefined) OptionalInt.of(preferredReadReplica.get) else OptionalInt.empty(), + Optional.empty() ) } } catch { @@ -1856,15 +1855,15 @@ class ReplicaManager(val config: KafkaConfig, error(s"Error processing fetch with max size $adjustedMaxBytes from $fetchSource " + s"on partition $tp: $fetchInfo", e) - LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, - highWatermark = UnifiedLog.UNKNOWN_OFFSET, - leaderLogStartOffset = UnifiedLog.UNKNOWN_OFFSET, - leaderLogEndOffset = UnifiedLog.UNKNOWN_OFFSET, - followerLogStartOffset = UnifiedLog.UNKNOWN_OFFSET, - fetchTimeMs = -1L, - lastStableOffset = None, - exception = Some(e) + new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + Optional.empty(), + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + UnifiedLog.UNKNOWN_OFFSET, + -1L, + OptionalLong.empty(), + Optional.of(e) ) } } @@ -1878,7 +1877,11 @@ class ReplicaManager(val config: KafkaConfig, // Once we read from a non-empty partition, we stop ignoring request and partition level size limits if (recordBatchSize > 0) minOneMessage = false - limitBytes = math.max(0, limitBytes - recordBatchSize) + // Because we don't know how much data will be retrieved in remote fetch yet, and we don't want to block the API call + // to query remoteLogMetadata, assume it will fetch the max bytes size of data to avoid to exceed the "fetch.max.bytes" setting. + val estimatedRecordBatchSize = if (recordBatchSize == 0 && readResult.info.delayedRemoteStorageFetch.isPresent) + readResult.info.delayedRemoteStorageFetch.get.fetchMaxBytes else recordBatchSize + limitBytes = math.max(0, limitBytes - estimatedRecordBatchSize) result += (tp -> readResult) } result @@ -1924,21 +1927,21 @@ class ReplicaManager(val config: KafkaConfig, ) } else { // For consume fetch requests, create a dummy FetchDataInfo with the remote storage fetch information. - // For the first topic-partition that needs remote data, we will use this information to read the data in another thread. + // For the topic-partitions that need remote data, we will use this information to read the data in another thread. new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, Optional.empty(), - Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp.topicPartition(), + Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp, fetchInfo, params.isolation))) } - LogReadResult(fetchDataInfo, - divergingEpoch = None, + new LogReadResult(fetchDataInfo, + Optional.empty(), highWatermark, leaderLogStartOffset, leaderLogEndOffset, fetchInfo.logStartOffset, fetchTimeMs, - Some(log.lastStableOffset), - exception = None) + OptionalLong.of(log.lastStableOffset), + Optional.empty[Throwable]()) } } else { createLogReadResult(exception) @@ -2012,189 +2015,6 @@ class ReplicaManager(val config: KafkaConfig, def getLogConfig(topicPartition: TopicPartition): Option[LogConfig] = localLog(topicPartition).map(_.config) - def becomeLeaderOrFollower(correlationId: Int, - leaderAndIsrRequest: LeaderAndIsrRequest, - onLeadershipChange: (Iterable[Partition], Iterable[Partition]) => Unit): LeaderAndIsrResponse = { - val startMs = time.milliseconds() - replicaStateChangeLock synchronized { - val controllerId = leaderAndIsrRequest.controllerId - val requestPartitionStates = leaderAndIsrRequest.partitionStates.asScala - stateChangeLogger.info(s"Handling LeaderAndIsr request correlationId $correlationId from controller " + - s"$controllerId for ${requestPartitionStates.size} partitions") - if (stateChangeLogger.isTraceEnabled) - requestPartitionStates.foreach { partitionState => - stateChangeLogger.trace(s"Received LeaderAndIsr request $partitionState " + - s"correlation id $correlationId from controller $controllerId " + - s"epoch ${leaderAndIsrRequest.controllerEpoch}") - } - val topicIds = leaderAndIsrRequest.topicIds() - def topicIdFromRequest(topicName: String): Option[Uuid] = { - val topicId = topicIds.get(topicName) - // if invalid topic ID return None - if (topicId == null || topicId == Uuid.ZERO_UUID) - None - else - Some(topicId) - } - - val response = { - if (leaderAndIsrRequest.controllerEpoch < controllerEpoch) { - stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from controller $controllerId with " + - s"correlation id $correlationId since its controller epoch ${leaderAndIsrRequest.controllerEpoch} is old. " + - s"Latest known controller epoch is $controllerEpoch") - leaderAndIsrRequest.getErrorResponse(Errors.STALE_CONTROLLER_EPOCH.exception) - } else { - val responseMap = new mutable.HashMap[TopicPartition, Errors] - controllerEpoch = leaderAndIsrRequest.controllerEpoch - - val partitions = new mutable.HashSet[Partition]() - val partitionsToBeLeader = new mutable.HashMap[Partition, LeaderAndIsrRequest.PartitionState]() - val partitionsToBeFollower = new mutable.HashMap[Partition, LeaderAndIsrRequest.PartitionState]() - val topicIdUpdateFollowerPartitions = new mutable.HashSet[Partition]() - val allTopicPartitionsInRequest = new mutable.HashSet[TopicPartition]() - - // First create the partition if it doesn't exist already - requestPartitionStates.foreach { partitionState => - val topicPartition = new TopicPartition(partitionState.topicName, partitionState.partitionIndex) - allTopicPartitionsInRequest += topicPartition - val partitionOpt = getPartition(topicPartition) match { - case HostedPartition.Offline(_) => - stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from " + - s"controller $controllerId with correlation id $correlationId " + - s"epoch $controllerEpoch for partition $topicPartition as the local replica for the " + - "partition is in an offline log directory") - responseMap.put(topicPartition, Errors.KAFKA_STORAGE_ERROR) - None - - case HostedPartition.Online(partition) => - Some(partition) - - case HostedPartition.None => - val partition = Partition(topicPartition, time, this) - allPartitions.putIfNotExists(topicPartition, HostedPartition.Online(partition)) - Some(partition) - } - - // Next check the topic ID and the partition's leader epoch - partitionOpt.foreach { partition => - val currentLeaderEpoch = partition.getLeaderEpoch - val requestLeaderEpoch = partitionState.leaderEpoch - val requestTopicId = topicIdFromRequest(topicPartition.topic) - val logTopicId = partition.topicId - - if (!hasConsistentTopicId(requestTopicId, logTopicId)) { - stateChangeLogger.error(s"Topic ID in memory: ${logTopicId.get} does not" + - s" match the topic ID for partition $topicPartition received: " + - s"${requestTopicId.get}.") - responseMap.put(topicPartition, Errors.INCONSISTENT_TOPIC_ID) - } else if (requestLeaderEpoch >= currentLeaderEpoch) { - // If the leader epoch is valid record the epoch of the controller that made the leadership decision. - // This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path - if (partitionState.replicas.contains(localBrokerId)) { - partitions += partition - if (partitionState.leader == localBrokerId) { - partitionsToBeLeader.put(partition, partitionState) - } else { - partitionsToBeFollower.put(partition, partitionState) - } - } else { - stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from controller $controllerId with " + - s"correlation id $correlationId epoch $controllerEpoch for partition $topicPartition as itself is not " + - s"in assigned replica list ${partitionState.replicas.asScala.mkString(",")}") - responseMap.put(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) - } - } else if (requestLeaderEpoch < currentLeaderEpoch) { - stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from " + - s"controller $controllerId with correlation id $correlationId " + - s"epoch $controllerEpoch for partition $topicPartition since its associated " + - s"leader epoch $requestLeaderEpoch is smaller than the current " + - s"leader epoch $currentLeaderEpoch") - responseMap.put(topicPartition, Errors.STALE_CONTROLLER_EPOCH) - } else { - val error = requestTopicId match { - case Some(topicId) if logTopicId.isEmpty => - // The controller may send LeaderAndIsr to upgrade to using topic IDs without bumping the epoch. - // If we have a matching epoch, we expect the log to be defined. - val log = localLogOrException(partition.topicPartition) - log.assignTopicId(topicId) - stateChangeLogger.info(s"Updating log for $topicPartition to assign topic ID " + - s"$topicId from LeaderAndIsr request from controller $controllerId with correlation " + - s"id $correlationId epoch $controllerEpoch") - if (partitionState.leader != localBrokerId) - topicIdUpdateFollowerPartitions.add(partition) - Errors.NONE - case None if logTopicId.isDefined && partitionState.leader != localBrokerId => - // If we have a topic ID in the log but not in the request, we must have previously had topic IDs but - // are now downgrading. If we are a follower, remove the topic ID from the PartitionFetchState. - stateChangeLogger.info(s"Updating PartitionFetchState for $topicPartition to remove log topic ID " + - s"${logTopicId.get} since LeaderAndIsr request from controller $controllerId with correlation " + - s"id $correlationId epoch $controllerEpoch did not contain a topic ID") - topicIdUpdateFollowerPartitions.add(partition) - Errors.NONE - case _ => - stateChangeLogger.info(s"Ignoring LeaderAndIsr request from " + - s"controller $controllerId with correlation id $correlationId " + - s"epoch $controllerEpoch for partition $topicPartition since its associated " + - s"leader epoch $requestLeaderEpoch matches the current leader epoch") - Errors.STALE_CONTROLLER_EPOCH - } - responseMap.put(topicPartition, error) - } - } - } - - val highWatermarkCheckpoints = new LazyOffsetCheckpoints(this.highWatermarkCheckpoints.asJava) - val partitionsBecomeLeader = if (partitionsToBeLeader.nonEmpty) - makeLeaders(controllerId, controllerEpoch, partitionsToBeLeader, correlationId, responseMap, - highWatermarkCheckpoints, topicIdFromRequest) - else - Set.empty[Partition] - val partitionsBecomeFollower = if (partitionsToBeFollower.nonEmpty) - makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, correlationId, responseMap, - highWatermarkCheckpoints, topicIdFromRequest) - else - Set.empty[Partition] - - val followerTopicSet = partitionsBecomeFollower.map(_.topic).toSet - updateLeaderAndFollowerMetrics(followerTopicSet) - - if (topicIdUpdateFollowerPartitions.nonEmpty) - updateTopicIdForFollowers(controllerId, controllerEpoch, topicIdUpdateFollowerPartitions, correlationId, topicIdFromRequest) - - // We initialize highwatermark thread after the first LeaderAndIsr request. This ensures that all the partitions - // have been completely populated before starting the checkpointing there by avoiding weird race conditions - startHighWatermarkCheckPointThread() - - maybeAddLogDirFetchers(partitions, highWatermarkCheckpoints, topicIdFromRequest) - - replicaFetcherManager.shutdownIdleFetcherThreads() - replicaAlterLogDirsManager.shutdownIdleFetcherThreads() - - remoteLogManager.foreach(rlm => rlm.onLeadershipChange((partitionsBecomeLeader.toSet: Set[TopicPartitionLog]).asJava, (partitionsBecomeFollower.toSet: Set[TopicPartitionLog]).asJava, topicIds)) - - onLeadershipChange(partitionsBecomeLeader, partitionsBecomeFollower) - - val topics = new util.LinkedHashMap[Uuid, util.List[LeaderAndIsrResponse.PartitionError]] - responseMap.foreachEntry { (tp, error) => - val topicId = topicIds.get(tp.topic) - var partitionErrors = topics.get(topicId) - if (partitionErrors == null) { - partitionErrors = new util.ArrayList[LeaderAndIsrResponse.PartitionError]() - topics.put(topicId, partitionErrors) - } - partitionErrors.add(new LeaderAndIsrResponse.PartitionError(tp.partition(), error.code)) - } - new LeaderAndIsrResponse(Errors.NONE, topics) - } - } - val endMs = time.milliseconds() - val elapsedMs = endMs - startMs - stateChangeLogger.info(s"Finished LeaderAndIsr request in ${elapsedMs}ms correlationId $correlationId from controller " + - s"$controllerId for ${requestPartitionStates.size} partitions") - response - } - } - /** * Checks if the topic ID provided in the request is consistent with the topic ID in the log. * When using this method to handle a Fetch request, the topic ID may have been provided by an earlier request. @@ -2224,9 +2044,7 @@ class ReplicaManager(val config: KafkaConfig, private def updateLeaderAndFollowerMetrics(newFollowerTopics: Set[String]): Unit = { val leaderTopicSet = leaderPartitionsIterator.map(_.topic).toSet newFollowerTopics.diff(leaderTopicSet).foreach(brokerTopicStats.removeOldLeaderMetrics) - - // remove metrics for brokers which are not followers of a topic - leaderTopicSet.diff(newFollowerTopics).foreach(brokerTopicStats.removeOldFollowerMetrics) + // Currently, there are no follower metrics that need to be updated. } protected[server] def maybeAddLogDirFetchers(partitions: Set[Partition], @@ -2259,243 +2077,6 @@ class ReplicaManager(val config: KafkaConfig, } } - /* - * Make the current broker to become leader for a given set of partitions by: - * - * 1. Stop fetchers for these partitions - * 2. Update the partition metadata in cache - * 3. Add these partitions to the leader partitions set - * - * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where - * the error message will be set on each partition since we do not know which partition caused it. Otherwise, - * return the set of partitions that are made leader due to this method - * - * TODO: the above may need to be fixed later - */ - private def makeLeaders(controllerId: Int, - controllerEpoch: Int, - partitionStates: Map[Partition, LeaderAndIsrRequest.PartitionState], - correlationId: Int, - responseMap: mutable.Map[TopicPartition, Errors], - highWatermarkCheckpoints: OffsetCheckpoints, - topicIds: String => Option[Uuid]): Set[Partition] = { - val traceEnabled = stateChangeLogger.isTraceEnabled - partitionStates.keys.foreach { partition => - if (traceEnabled) - stateChangeLogger.trace(s"Handling LeaderAndIsr request correlationId $correlationId from " + - s"controller $controllerId epoch $controllerEpoch starting the become-leader transition for " + - s"partition ${partition.topicPartition}") - responseMap.put(partition.topicPartition, Errors.NONE) - } - - val partitionsToMakeLeaders = mutable.Set[Partition]() - - try { - // First stop fetchers for all the partitions - replicaFetcherManager.removeFetcherForPartitions(partitionStates.keySet.map(_.topicPartition)) - stateChangeLogger.info(s"Stopped fetchers as part of LeaderAndIsr request correlationId $correlationId from " + - s"controller $controllerId epoch $controllerEpoch as part of the become-leader transition for " + - s"${partitionStates.size} partitions") - // Update the partition information to be the leader - partitionStates.foreachEntry { (partition, partitionState) => - try { - if (partition.makeLeader(partitionState, highWatermarkCheckpoints, topicIds(partitionState.topicName))) { - partitionsToMakeLeaders += partition - } - } catch { - case e: KafkaStorageException => - stateChangeLogger.error(s"Skipped the become-leader state change with " + - s"correlation id $correlationId from controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + - s"(last update controller epoch ${partitionState.controllerEpoch}) since " + - s"the replica for the partition is offline due to storage error $e") - // If there is an offline log directory, a Partition object may have been created and have been added - // to `ReplicaManager.allPartitions` before `createLogIfNotExists()` failed to create local replica due - // to KafkaStorageException. In this case `ReplicaManager.allPartitions` will map this topic-partition - // to an empty Partition object. We need to map this topic-partition to OfflinePartition instead. - markPartitionOffline(partition.topicPartition) - responseMap.put(partition.topicPartition, Errors.KAFKA_STORAGE_ERROR) - } - } - - } catch { - case e: Throwable => - partitionStates.keys.foreach { partition => - stateChangeLogger.error(s"Error while processing LeaderAndIsr request correlationId $correlationId received " + - s"from controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition}", e) - } - // Re-throw the exception for it to be caught in KafkaApis - throw e - } - - if (traceEnabled) - partitionStates.keys.foreach { partition => - stateChangeLogger.trace(s"Completed LeaderAndIsr request correlationId $correlationId from controller $controllerId " + - s"epoch $controllerEpoch for the become-leader transition for partition ${partition.topicPartition}") - } - - partitionsToMakeLeaders - } - - /* - * Make the current broker to become follower for a given set of partitions by: - * - * 1. Remove these partitions from the leader partitions set. - * 2. Mark the replicas as followers so that no more data can be added from the producer clients. - * 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads. - * 4. Truncate the log and checkpoint offsets for these partitions. - * 5. Clear the produce and fetch requests in the purgatory - * 6. If the broker is not shutting down, add the fetcher to the new leaders. - * - * The ordering of doing these steps make sure that the replicas in transition will not - * take any more messages before checkpointing offsets so that all messages before the checkpoint - * are guaranteed to be flushed to disks - * - * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where - * the error message will be set on each partition since we do not know which partition caused it. Otherwise, - * return the set of partitions that are made follower due to this method - */ - private def makeFollowers(controllerId: Int, - controllerEpoch: Int, - partitionStates: Map[Partition, LeaderAndIsrRequest.PartitionState], - correlationId: Int, - responseMap: mutable.Map[TopicPartition, Errors], - highWatermarkCheckpoints: OffsetCheckpoints, - topicIds: String => Option[Uuid]) : Set[Partition] = { - val traceLoggingEnabled = stateChangeLogger.isTraceEnabled - partitionStates.foreachEntry { (partition, partitionState) => - if (traceLoggingEnabled) - stateChangeLogger.trace(s"Handling LeaderAndIsr request correlationId $correlationId from controller $controllerId " + - s"epoch $controllerEpoch starting the become-follower transition for partition ${partition.topicPartition} with leader " + - s"${partitionState.leader}") - responseMap.put(partition.topicPartition, Errors.NONE) - } - - val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set() - try { - partitionStates.foreachEntry { (partition, partitionState) => - val newLeaderBrokerId = partitionState.leader - try { - if (metadataCache.hasAliveBroker(newLeaderBrokerId)) { - // Only change partition state when the leader is available - if (partition.makeFollower(partitionState, highWatermarkCheckpoints, topicIds(partitionState.topicName))) { - // Skip invoking onBecomingFollower listeners as the listeners are not registered for zk-based features. - partitionsToMakeFollower += partition - } - } else { - // The leader broker should always be present in the metadata cache. - // If not, we should record the error message and abort the transition process for this partition - stateChangeLogger.error(s"Received LeaderAndIsrRequest with correlation id $correlationId from " + - s"controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + - s"(last update controller epoch ${partitionState.controllerEpoch}) " + - s"but cannot become follower since the new leader $newLeaderBrokerId is unavailable.") - // Create the local replica even if the leader is unavailable. This is required to ensure that we include - // the partition's high watermark in the checkpoint file (see KAFKA-1647) - partition.createLogIfNotExists(isNew = partitionState.isNew, isFutureReplica = false, - highWatermarkCheckpoints, topicIds(partitionState.topicName)) - } - } catch { - case e: KafkaStorageException => - stateChangeLogger.error(s"Skipped the become-follower state change with correlation id $correlationId from " + - s"controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + - s"(last update controller epoch ${partitionState.controllerEpoch}) with leader " + - s"$newLeaderBrokerId since the replica for the partition is offline due to storage error $e") - // If there is an offline log directory, a Partition object may have been created and have been added - // to `ReplicaManager.allPartitions` before `createLogIfNotExists()` failed to create local replica due - // to KafkaStorageException. In this case `ReplicaManager.allPartitions` will map this topic-partition - // to an empty Partition object. We need to map this topic-partition to OfflinePartition instead. - markPartitionOffline(partition.topicPartition) - responseMap.put(partition.topicPartition, Errors.KAFKA_STORAGE_ERROR) - } - } - - // Stopping the fetchers must be done first in order to initialize the fetch - // position correctly. - replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(_.topicPartition)) - stateChangeLogger.info(s"Stopped fetchers as part of become-follower request from controller $controllerId " + - s"epoch $controllerEpoch with correlation id $correlationId for ${partitionsToMakeFollower.size} partitions") - - partitionsToMakeFollower.foreach { partition => - completeDelayedOperationsWhenNotPartitionLeader(partition.topicPartition, partition.topicId) - } - - if (isShuttingDown.get()) { - if (traceLoggingEnabled) { - partitionsToMakeFollower.foreach { partition => - stateChangeLogger.trace(s"Skipped the adding-fetcher step of the become-follower state " + - s"change with correlation id $correlationId from controller $controllerId epoch $controllerEpoch for " + - s"partition ${partition.topicPartition} with leader ${partitionStates(partition).leader} " + - "since it is shutting down") - } - } - } else { - // we do not need to check if the leader exists again since this has been done at the beginning of this process - val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map { partition => - val leaderNode = partition.leaderReplicaIdOpt match { - case Some(leaderId) => metadataCache.getAliveBrokerNode(leaderId, config.interBrokerListenerName).orElse(Node.noNode()) - case None => Node.noNode() - } - val leader = new BrokerEndPoint(leaderNode.id(), leaderNode.host(), leaderNode.port()) - val log = partition.localLogOrException - val fetchOffset = initialFetchOffset(log) - partition.topicPartition -> InitialFetchState(topicIds(partition.topic), leader, partition.getLeaderEpoch, fetchOffset) - }.toMap - - replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset) - } - } catch { - case e: Throwable => - stateChangeLogger.error(s"Error while processing LeaderAndIsr request with correlationId $correlationId " + - s"received from controller $controllerId epoch $controllerEpoch", e) - // Re-throw the exception for it to be caught in KafkaApis - throw e - } - - if (traceLoggingEnabled) - partitionStates.keys.foreach { partition => - stateChangeLogger.trace(s"Completed LeaderAndIsr request correlationId $correlationId from controller $controllerId " + - s"epoch $controllerEpoch for the become-follower transition for partition ${partition.topicPartition} with leader " + - s"${partitionStates(partition).leader}") - } - - partitionsToMakeFollower - } - - private def updateTopicIdForFollowers(controllerId: Int, - controllerEpoch: Int, - partitions: Set[Partition], - correlationId: Int, - topicIds: String => Option[Uuid]): Unit = { - val traceLoggingEnabled = stateChangeLogger.isTraceEnabled - - try { - if (isShuttingDown.get()) { - if (traceLoggingEnabled) { - partitions.foreach { partition => - stateChangeLogger.trace(s"Skipped the update topic ID step of the become-follower state " + - s"change with correlation id $correlationId from controller $controllerId epoch $controllerEpoch for " + - s"partition ${partition.topicPartition} since it is shutting down") - } - } - } else { - val partitionsToUpdateFollowerWithLeader = mutable.Map.empty[TopicPartition, Int] - partitions.foreach { partition => - partition.leaderReplicaIdOpt.foreach { leader => - if (metadataCache.hasAliveBroker(leader)) { - partitionsToUpdateFollowerWithLeader += partition.topicPartition -> leader - } - } - } - replicaFetcherManager.maybeUpdateTopicIds(partitionsToUpdateFollowerWithLeader, topicIds) - } - } catch { - case e: Throwable => - stateChangeLogger.error(s"Error while processing LeaderAndIsr request with correlationId $correlationId " + - s"received from controller $controllerId epoch $controllerEpoch when trying to update topic IDs in the fetchers", e) - // Re-throw the exception for it to be caught in KafkaApis - throw e - } - } - /** * From IBP 2.7 onwards, we send latest fetch epoch in the request and truncate if a * diverging epoch is returned in the response, avoiding the need for a separate @@ -2512,7 +2093,7 @@ class ReplicaManager(val config: KafkaConfig, trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR") // Shrink ISRs for non offline partitions - allPartitions.keys.foreach { topicPartition => + allPartitions.forEach { (topicPartition, _) => onlinePartition(topicPartition).foreach(_.maybeShrinkIsr()) } } @@ -2643,14 +2224,14 @@ class ReplicaManager(val config: KafkaConfig, private def removeAllTopicMetrics(): Unit = { val allTopics = new util.HashSet[String] - allPartitions.keys.foreach(partition => + allPartitions.forEach((partition, _) => if (allTopics.add(partition.topic())) { brokerTopicStats.removeMetrics(partition.topic()) }) } - protected def createReplicaFetcherManager(metrics: Metrics, time: Time, threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager) = { - new ReplicaFetcherManager(config, this, metrics, time, threadNamePrefix, quotaManager, () => metadataCache.metadataVersion(), brokerEpochSupplier) + protected def createReplicaFetcherManager(metrics: Metrics, time: Time, quotaManager: ReplicationQuotaManager) = { + new ReplicaFetcherManager(config, this, metrics, time, quotaManager, () => metadataCache.metadataVersion(), brokerEpochSupplier) } protected def createReplicaAlterLogDirsManager(quotaManager: ReplicationQuotaManager, brokerTopicStats: BrokerTopicStats) = { @@ -2836,9 +2417,8 @@ class ReplicaManager(val config: KafkaConfig, localLeaders.foreachEntry { (tp, info) => getOrCreatePartition(tp, delta, info.topicId).foreach { case (partition, isNew) => try { - val state = info.partition.toLeaderAndIsrPartitionState(tp, isNew) val partitionAssignedDirectoryId = directoryIds.find(_._1.topicPartition() == tp).map(_._2) - partition.makeLeader(state, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) + partition.makeLeader(info.partition, isNew, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) changedPartitions.add(partition) } catch { @@ -2878,9 +2458,8 @@ class ReplicaManager(val config: KafkaConfig, // - This also ensures that the local replica is created even if the leader // is unavailable. This is required to ensure that we include the partition's // high watermark in the checkpoint file (see KAFKA-1647). - val state = info.partition.toLeaderAndIsrPartitionState(tp, isNew) val partitionAssignedDirectoryId = directoryIds.find(_._1.topicPartition() == tp).map(_._2) - val isNewLeaderEpoch = partition.makeFollower(state, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) + val isNewLeaderEpoch = partition.makeFollower(info.partition, isNew, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) if (isInControlledShutdown && (info.partition.leader == NO_LEADER || !info.partition.isr.contains(config.brokerId))) { diff --git a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala index 8229607b5be3d..083e0d7cafff8 100644 --- a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala +++ b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala @@ -23,7 +23,7 @@ import org.apache.kafka.common.errors.ClusterAuthorizationException import org.apache.kafka.common.network.Send import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse} import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.quota.ThrottleCallback +import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ThrottleCallback} class RequestHandlerHelper( requestChannel: RequestChannel, @@ -40,7 +40,7 @@ class RequestHandlerHelper( override def startThrottling(): Unit = requestChannel.startThrottling(request) override def endThrottling(): Unit = requestChannel.endThrottling(request) } - quotaManager.throttle(request, callback, throttleTimeMs) + quotaManager.throttle(request.header.clientId(), request.session, callback, throttleTimeMs) } def handleError(request: RequestChannel.Request, e: Throwable): Unit = { diff --git a/core/src/main/scala/kafka/server/SharedServer.scala b/core/src/main/scala/kafka/server/SharedServer.scala index 69d2353fb833a..aba9035cb7e94 100644 --- a/core/src/main/scala/kafka/server/SharedServer.scala +++ b/core/src/main/scala/kafka/server/SharedServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.raft.Endpoints import org.apache.kafka.server.{ProcessRole, ServerSocketFactory} import org.apache.kafka.server.common.ApiMessageAndVersion import org.apache.kafka.server.fault.{FaultHandler, LoggingFaultHandler, ProcessTerminatingFaultHandler} -import org.apache.kafka.server.metrics.{BrokerServerMetrics, KafkaYammerMetrics} +import org.apache.kafka.server.metrics.{BrokerServerMetrics, KafkaYammerMetrics, NodeMetrics} import java.net.InetSocketAddress import java.util.Arrays @@ -116,6 +116,7 @@ class SharedServer( @volatile var raftManager: KafkaRaftManager[ApiMessageAndVersion] = _ @volatile var brokerMetrics: BrokerServerMetrics = _ @volatile var controllerServerMetrics: ControllerMetadataMetrics = _ + @volatile var nodeMetrics: NodeMetrics = _ @volatile var loader: MetadataLoader = _ private val snapshotsDisabledReason = new AtomicReference[String](null) @volatile var snapshotEmitter: SnapshotEmitter = _ @@ -298,6 +299,7 @@ class SharedServer( raftManager = _raftManager _raftManager.startup() + nodeMetrics = new NodeMetrics(metrics, controllerConfig.unstableFeatureVersionsEnabled) metadataLoaderMetrics = if (brokerMetrics != null) { new MetadataLoaderMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry()), elapsedNs => brokerMetrics.updateBatchProcessingTime(elapsedNs), @@ -340,7 +342,7 @@ class SharedServer( throw new RuntimeException("Unable to install metadata publishers.", t) } } - _raftManager.register(loader) + _raftManager.client.register(loader) debug("Completed SharedServer startup.") started = true } catch { @@ -387,6 +389,8 @@ class SharedServer( controllerServerMetrics = null Utils.closeQuietly(brokerMetrics, "broker metrics") brokerMetrics = null + Utils.closeQuietly(nodeMetrics, "node metrics") + nodeMetrics = null Utils.closeQuietly(metrics, "metrics") metrics = null CoreUtils.swallow(AppInfoParser.unregisterAppInfo(MetricsPrefix, sharedServerConfig.nodeId.toString, metrics), this) diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index de8f16e1e5808..8df8a27558008 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -20,20 +20,24 @@ package kafka.server.metadata import java.util.OptionalInt import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager +import kafka.server.share.SharePartitionManager import kafka.server.{KafkaConfig, ReplicaManager} import kafka.utils.Logging import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.TimeoutException import org.apache.kafka.common.internals.Topic +import org.apache.kafka.coordinator.common.runtime.{KRaftCoordinatorMetadataDelta, KRaftCoordinatorMetadataImage} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image.loader.LoaderManifest import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.image.{MetadataDelta, MetadataImage, TopicDelta} -import org.apache.kafka.metadata.publisher.AclPublisher -import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} +import org.apache.kafka.server.common.MetadataVersion.MINIMUM_VERSION +import org.apache.kafka.server.common.{FinalizedFeatures, RequestLocal, ShareVersion} import org.apache.kafka.server.fault.FaultHandler +import org.apache.kafka.storage.internals.log.{LogManager => JLogManager} import java.util.concurrent.CompletableFuture import scala.collection.mutable @@ -70,7 +74,8 @@ class BrokerMetadataPublisher( replicaManager: ReplicaManager, groupCoordinator: GroupCoordinator, txnCoordinator: TransactionCoordinator, - shareCoordinator: Option[ShareCoordinator], + shareCoordinator: ShareCoordinator, + sharePartitionManager: SharePartitionManager, var dynamicConfigPublisher: DynamicConfigPublisher, dynamicClientQuotaPublisher: DynamicClientQuotaPublisher, dynamicTopicClusterQuotaPublisher: DynamicTopicClusterQuotaPublisher, @@ -78,7 +83,7 @@ class BrokerMetadataPublisher( delegationTokenPublisher: DelegationTokenPublisher, aclPublisher: AclPublisher, fatalFaultHandler: FaultHandler, - metadataPublishingFaultHandler: FaultHandler, + metadataPublishingFaultHandler: FaultHandler ) extends MetadataPublisher with Logging { logIdent = s"[BrokerMetadataPublisher id=${config.nodeId}] " @@ -99,6 +104,11 @@ class BrokerMetadataPublisher( */ val firstPublishFuture = new CompletableFuture[Void] + /** + * The share version being used in the broker metadata. + */ + private var finalizedShareVersion: Short = FinalizedFeatures.fromKRaftVersion(MINIMUM_VERSION).finalizedFeatures().getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort) + override def name(): String = "BrokerMetadataPublisher" override def onMetadataUpdate( @@ -165,18 +175,16 @@ class BrokerMetadataPublisher( case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating txn " + s"coordinator with local changes in $deltaName", t) } - if (shareCoordinator.isDefined) { - try { - updateCoordinator(newImage, - delta, - Topic.SHARE_GROUP_STATE_TOPIC_NAME, - shareCoordinator.get.onElection, - (partitionIndex, leaderEpochOpt) => shareCoordinator.get.onResignation(partitionIndex, toOptionalInt(leaderEpochOpt)) - ) - } catch { - case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + - s"coordinator with local changes in $deltaName", t) - } + try { + updateCoordinator(newImage, + delta, + Topic.SHARE_GROUP_STATE_TOPIC_NAME, + shareCoordinator.onElection, + (partitionIndex, leaderEpochOpt) => shareCoordinator.onResignation(partitionIndex, toOptionalInt(leaderEpochOpt)) + ) + } catch { + case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + + s"coordinator with local changes in $deltaName", t) } try { // Notify the group coordinator about deleted topics. @@ -194,6 +202,16 @@ class BrokerMetadataPublisher( case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating group " + s"coordinator with deleted partitions in $deltaName", t) } + try { + // Notify the share coordinator about deleted topics. + val deletedTopicIds = topicsDelta.deletedTopicIds() + if (!deletedTopicIds.isEmpty) { + shareCoordinator.onTopicsDeleted(topicsDelta.deletedTopicIds, RequestLocal.noCaching.bufferSupplier) + } + } catch { + case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + + s"coordinator with deleted partitions in $deltaName", t) + } } // Apply configuration deltas. @@ -206,17 +224,17 @@ class BrokerMetadataPublisher( dynamicTopicClusterQuotaPublisher.onMetadataUpdate(delta, newImage) // Apply SCRAM delta. - scramPublisher.onMetadataUpdate(delta, newImage) + scramPublisher.onMetadataUpdate(delta, newImage, manifest) // Apply DelegationToken delta. - delegationTokenPublisher.onMetadataUpdate(delta, newImage) + delegationTokenPublisher.onMetadataUpdate(delta, newImage, manifest) // Apply ACL delta. aclPublisher.onMetadataUpdate(delta, newImage, manifest) try { // Propagate the new image to the group coordinator. - groupCoordinator.onNewMetadataImage(newImage, delta) + groupCoordinator.onNewMetadataImage(new KRaftCoordinatorMetadataImage(newImage), new KRaftCoordinatorMetadataDelta(delta)) } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating group " + s"coordinator with local changes in $deltaName", t) @@ -224,7 +242,7 @@ class BrokerMetadataPublisher( try { // Propagate the new image to the share coordinator. - shareCoordinator.foreach(coordinator => coordinator.onNewMetadataImage(newImage, delta)) + shareCoordinator.onNewMetadataImage(new KRaftCoordinatorMetadataImage(newImage), newImage.features(), new KRaftCoordinatorMetadataDelta(delta)) } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + s"coordinator with local changes in $deltaName", t) @@ -233,6 +251,24 @@ class BrokerMetadataPublisher( if (_firstPublish) { finishInitializingReplicaManager() } + + if (delta.featuresDelta != null) { + try { + val newFinalizedFeatures = new FinalizedFeatures(newImage.features.metadataVersionOrThrow, newImage.features.finalizedVersions, newImage.provenance.lastContainedOffset) + val newFinalizedShareVersion = newFinalizedFeatures.finalizedFeatures().getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort) + // Share version feature has been toggled. + if (newFinalizedShareVersion != finalizedShareVersion) { + finalizedShareVersion = newFinalizedShareVersion + val shareVersion: ShareVersion = ShareVersion.fromFeatureLevel(finalizedShareVersion) + info(s"Feature share.version has been updated to version $finalizedShareVersion") + sharePartitionManager.onShareVersionToggle(shareVersion, config.shareGroupConfig.isShareGroupEnabled) + } + } catch { + case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share partition manager " + + s" with share version feature change in $deltaName", t) + } + } + } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Uncaught exception while " + s"publishing broker metadata from $deltaName", t) @@ -252,6 +288,11 @@ class BrokerMetadataPublisher( /** * Update the coordinator of local replica changes: election and resignation. * + * When the topic is deleted or a partition of the topic is deleted, {@param resignation} + * callback must be called with {@code None}. The coordinator expects the leader epoch to be + * incremented when the {@param resignation} callback is called but the leader epoch + * is not incremented when a topic is deleted. + * * @param image latest metadata image * @param delta metadata delta from the previous image and the latest image * @param topicName name of the topic associated with the coordinator @@ -272,7 +313,7 @@ class BrokerMetadataPublisher( if (topicsDelta.topicWasDeleted(topicName)) { topicsDelta.image.getTopic(topicName).partitions.entrySet.forEach { entry => if (entry.getValue.leader == brokerId) { - resignation(entry.getKey, Some(entry.getValue.leaderEpoch)) + resignation(entry.getKey, None) } } } @@ -300,7 +341,7 @@ class BrokerMetadataPublisher( // recovery-from-unclean-shutdown if required. logManager.startup( metadataCache.getAllTopics().asScala, - isStray = log => LogManager.isStrayKraftReplica(brokerId, newImage.topics(), log) + isStray = log => JLogManager.isStrayKraftReplica(brokerId, newImage.topics(), log) ) // Rename all future replicas which are in the same directory as the @@ -339,14 +380,12 @@ class BrokerMetadataPublisher( } catch { case t: Throwable => fatalFaultHandler.handleFault("Error starting TransactionCoordinator", t) } - if (config.shareGroupConfig.isShareGroupEnabled && shareCoordinator.isDefined) { - try { - // Start the share coordinator. - shareCoordinator.get.startup(() => metadataCache.numPartitions( - Topic.SHARE_GROUP_STATE_TOPIC_NAME).orElse(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions())) - } catch { - case t: Throwable => fatalFaultHandler.handleFault("Error starting Share coordinator", t) - } + try { + // Start the share coordinator. + shareCoordinator.startup(() => metadataCache.numPartitions(Topic.SHARE_GROUP_STATE_TOPIC_NAME) + .orElse(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions())) + } catch { + case t: Throwable => fatalFaultHandler.handleFault("Error starting Share coordinator", t) } } diff --git a/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala b/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala index 8fae9941b4112..cda7661907dd9 100644 --- a/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala +++ b/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala @@ -18,24 +18,22 @@ package kafka.server.metadata import kafka.network.ConnectionQuotas -import kafka.server.ClientQuotaManager -import kafka.server.ClientQuotaManager.BaseUserEntity import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.ClientQuotaMetadataManager.transferToClientQuotaEntity import kafka.utils.Logging import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.quota.ClientQuotaEntity +import org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntity import org.apache.kafka.common.utils.Sanitizer -import org.apache.kafka.server.quota.ClientQuotaEntity.{ConfigEntity => ClientQuotaConfigEntity} import java.net.{InetAddress, UnknownHostException} +import java.util.Optional import org.apache.kafka.image.{ClientQuotaDelta, ClientQuotasDelta} import org.apache.kafka.server.config.QuotaConfig +import org.apache.kafka.server.quota.ClientQuotaManager import scala.jdk.OptionConverters.RichOptionalDouble - - // A strict hierarchy of entities that we support sealed trait QuotaEntity case class IpEntity(ip: String) extends QuotaEntity @@ -150,13 +148,13 @@ class ClientQuotaMetadataManager(private[metadata] val quotaManagers: QuotaManag // Convert entity into Options with sanitized values for QuotaManagers val (userEntity, clientEntity) = transferToClientQuotaEntity(quotaEntity) + val quotaValue = newValue.map(v => Optional.of(new Quota(v, true))).getOrElse(Optional.empty[Quota]()) - val quotaValue = newValue.map(new Quota(_, true)) try { manager.updateQuota( - userEntity = userEntity, - clientEntity = clientEntity, - quota = quotaValue + userEntity, + clientEntity, + quotaValue ) } catch { case t: Throwable => error(s"Failed to update user-client quota $quotaEntity", t) @@ -166,24 +164,24 @@ class ClientQuotaMetadataManager(private[metadata] val quotaManagers: QuotaManag object ClientQuotaMetadataManager { - def transferToClientQuotaEntity(quotaEntity: QuotaEntity): (Option[BaseUserEntity], Option[ClientQuotaConfigEntity]) = { + def transferToClientQuotaEntity(quotaEntity: QuotaEntity): (Optional[ConfigEntity], Optional[ConfigEntity]) = { quotaEntity match { case UserEntity(user) => - (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), None) + (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.empty()) case DefaultUserEntity => - (Some(ClientQuotaManager.DefaultUserEntity), None) + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty()) case ClientIdEntity(clientId) => - (None, Some(ClientQuotaManager.ClientIdEntity(clientId))) + (Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) case DefaultClientIdEntity => - (None, Some(ClientQuotaManager.DefaultClientIdEntity)) + (Optional.empty(), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) case ExplicitUserExplicitClientIdEntity(user, clientId) => - (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Some(ClientQuotaManager.ClientIdEntity(clientId))) + (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) case ExplicitUserDefaultClientIdEntity(user) => - (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Some(ClientQuotaManager.DefaultClientIdEntity)) + (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) case DefaultUserExplicitClientIdEntity(clientId) => - (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.ClientIdEntity(clientId))) + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) case DefaultUserDefaultClientIdEntity => - (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.DefaultClientIdEntity)) + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) case IpEntity(_) | DefaultIpEntity => throw new IllegalStateException("Should not see IP quota entities here") } } diff --git a/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala b/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala deleted file mode 100644 index 0e12c34b3c590..0000000000000 --- a/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server.metadata - -import kafka.server.KafkaConfig -import kafka.utils.Logging -import org.apache.kafka.image.loader.LoaderManifest -import org.apache.kafka.image.{MetadataDelta, MetadataImage} -import org.apache.kafka.server.DelegationTokenManager -import org.apache.kafka.server.fault.FaultHandler - - -class DelegationTokenPublisher( - conf: KafkaConfig, - faultHandler: FaultHandler, - nodeType: String, - tokenManager: DelegationTokenManager, -) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { - logIdent = s"[${name()}] " - - var _firstPublish = true - - override def name(): String = s"DelegationTokenPublisher $nodeType id=${conf.nodeId}" - - override def onMetadataUpdate( - delta: MetadataDelta, - newImage: MetadataImage, - manifest: LoaderManifest - ): Unit = { - onMetadataUpdate(delta, newImage) - } - - def onMetadataUpdate( - delta: MetadataDelta, - newImage: MetadataImage, - ): Unit = { - val deltaName = if (_firstPublish) { - s"initial MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" - } else { - s"update MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" - } - try { - if (_firstPublish) { - // Initialize the tokenCache with the Image - Option(newImage.delegationTokens()).foreach { delegationTokenImage => - delegationTokenImage.tokens().forEach { (_, delegationTokenData) => - tokenManager.updateToken(tokenManager.getDelegationToken(delegationTokenData.tokenInformation())) - } - } - _firstPublish = false - } - // Apply changes to DelegationTokens. - Option(delta.delegationTokenDelta()).foreach { delegationTokenDelta => - delegationTokenDelta.changes().forEach { - case (tokenId, delegationTokenData) => - if (delegationTokenData.isPresent) { - tokenManager.updateToken(tokenManager.getDelegationToken(delegationTokenData.get().tokenInformation())) - } else { - tokenManager.removeToken(tokenId) - } - } - } - } catch { - case t: Throwable => faultHandler.handleFault("Uncaught exception while " + - s"publishing DelegationToken changes from $deltaName", t) - } - } -} diff --git a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala index fb18da6872aee..88b2cf07012a6 100644 --- a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala +++ b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala @@ -373,6 +373,8 @@ class KRaftMetadataCache( override def topicIdsToNames(): util.Map[Uuid, String] = _currentImage.topics.topicIdToNameView() + override def topicNamesToIds(): util.Map[String, Uuid] = _currentImage.topics().topicNameToIdView() + // if the leader is not known, return None; // if the leader is known and corresponding node is available, return Some(node) // if the leader is known but corresponding node with the listener name is not available, return Some(NO_NODE) diff --git a/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala b/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala deleted file mode 100644 index 818e01fa5f807..0000000000000 --- a/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server.metadata - -import kafka.server.KafkaConfig -import kafka.utils.Logging -import org.apache.kafka.image.loader.LoaderManifest -import org.apache.kafka.image.{MetadataDelta, MetadataImage} -import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.fault.FaultHandler - - -class ScramPublisher( - conf: KafkaConfig, - faultHandler: FaultHandler, - nodeType: String, - credentialProvider: CredentialProvider, -) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { - logIdent = s"[${name()}] " - - override def name(): String = s"ScramPublisher $nodeType id=${conf.nodeId}" - - override def onMetadataUpdate( - delta: MetadataDelta, - newImage: MetadataImage, - manifest: LoaderManifest - ): Unit = { - onMetadataUpdate(delta, newImage) - } - - def onMetadataUpdate( - delta: MetadataDelta, - newImage: MetadataImage, - ): Unit = { - val deltaName = s"MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" - try { - // Apply changes to SCRAM credentials. - Option(delta.scramDelta()).foreach { scramDelta => - scramDelta.changes().forEach { - case (mechanism, userChanges) => - userChanges.forEach { - case (userName, change) => - if (change.isPresent) { - credentialProvider.updateCredential(mechanism, userName, change.get().toCredential) - } else { - credentialProvider.removeCredentials(mechanism, userName) - } - } - } - } - } catch { - case t: Throwable => faultHandler.handleFault("Uncaught exception while " + - s"publishing SCRAM changes from $deltaName", t) - } - } -} diff --git a/core/src/main/scala/kafka/tools/DumpLogSegments.scala b/core/src/main/scala/kafka/tools/DumpLogSegments.scala index a95c77cca4e85..0703c5474f08b 100755 --- a/core/src/main/scala/kafka/tools/DumpLogSegments.scala +++ b/core/src/main/scala/kafka/tools/DumpLogSegments.scala @@ -27,6 +27,7 @@ import org.apache.kafka.common.message.ConsumerProtocolAssignmentJsonConverter import org.apache.kafka.common.message.ConsumerProtocolSubscription import org.apache.kafka.common.message.ConsumerProtocolSubscriptionJsonConverter import org.apache.kafka.common.message.KRaftVersionRecordJsonConverter +import org.apache.kafka.common.message.LeaderChangeMessageJsonConverter import org.apache.kafka.common.message.SnapshotFooterRecordJsonConverter import org.apache.kafka.common.message.SnapshotHeaderRecordJsonConverter import org.apache.kafka.common.message.VotersRecordJsonConverter @@ -310,26 +311,7 @@ object DumpLogSegments { } if (batch.isControlBatch) { - val controlTypeId = ControlRecordType.parseTypeId(record.key) - ControlRecordType.fromTypeId(controlTypeId) match { - case ControlRecordType.ABORT | ControlRecordType.COMMIT => - val endTxnMarker = EndTransactionMarker.deserialize(record) - print(s" endTxnMarker: ${endTxnMarker.controlType} coordinatorEpoch: ${endTxnMarker.coordinatorEpoch}") - case ControlRecordType.SNAPSHOT_HEADER => - val header = ControlRecordUtils.deserializeSnapshotHeaderRecord(record) - print(s" SnapshotHeader ${SnapshotHeaderRecordJsonConverter.write(header, header.version())}") - case ControlRecordType.SNAPSHOT_FOOTER => - val footer = ControlRecordUtils.deserializeSnapshotFooterRecord(record) - print(s" SnapshotFooter ${SnapshotFooterRecordJsonConverter.write(footer, footer.version())}") - case ControlRecordType.KRAFT_VERSION => - val kraftVersion = ControlRecordUtils.deserializeKRaftVersionRecord(record) - print(s" KRaftVersion ${KRaftVersionRecordJsonConverter.write(kraftVersion, kraftVersion.version())}") - case ControlRecordType.KRAFT_VOTERS=> - val voters = ControlRecordUtils.deserializeVotersRecord(record) - print(s" KRaftVoters ${VotersRecordJsonConverter.write(voters, voters.version())}") - case controlType => - print(s" controlType: $controlType($controlTypeId)") - } + printControlRecord(record) } } if (printContents && !batch.isControlBatch) { @@ -351,6 +333,32 @@ object DumpLogSegments { } finally fileRecords.closeHandlers() } + private def printControlRecord(record: Record): Unit = { + val controlTypeId = ControlRecordType.parseTypeId(record.key) + ControlRecordType.fromTypeId(controlTypeId) match { + case ControlRecordType.ABORT | ControlRecordType.COMMIT => + val endTxnMarker = EndTransactionMarker.deserialize(record) + print(s" endTxnMarker: ${endTxnMarker.controlType} coordinatorEpoch: ${endTxnMarker.coordinatorEpoch}") + case ControlRecordType.LEADER_CHANGE => + val leaderChangeMessage = ControlRecordUtils.deserializeLeaderChangeMessage(record) + print(s" LeaderChange: ${LeaderChangeMessageJsonConverter.write(leaderChangeMessage, leaderChangeMessage.version())}") + case ControlRecordType.SNAPSHOT_HEADER => + val header = ControlRecordUtils.deserializeSnapshotHeaderRecord(record) + print(s" SnapshotHeader ${SnapshotHeaderRecordJsonConverter.write(header, header.version())}") + case ControlRecordType.SNAPSHOT_FOOTER => + val footer = ControlRecordUtils.deserializeSnapshotFooterRecord(record) + print(s" SnapshotFooter ${SnapshotFooterRecordJsonConverter.write(footer, footer.version())}") + case ControlRecordType.KRAFT_VERSION => + val kraftVersion = ControlRecordUtils.deserializeKRaftVersionRecord(record) + print(s" KRaftVersion ${KRaftVersionRecordJsonConverter.write(kraftVersion, kraftVersion.version())}") + case ControlRecordType.KRAFT_VOTERS=> + val voters = ControlRecordUtils.deserializeVotersRecord(record) + print(s" KRaftVoters ${VotersRecordJsonConverter.write(voters, voters.version())}") + case controlType => + print(s" controlType: $controlType($controlTypeId)") + } + } + private def printBatchLevel(batch: FileLogInputStream.FileChannelRecordBatch, accumulativeBytes: Long): Unit = { if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) print("baseOffset: " + batch.baseOffset + " lastOffset: " + batch.lastOffset + " count: " + batch.countOrNull + @@ -567,7 +575,7 @@ object DumpLogSegments { private class RemoteMetadataLogMessageParser extends MessageParser[String, String] { private val metadataRecordSerde = new RemoteLogMetadataSerde - + override def parse(record: Record): (Option[String], Option[String]) = { val output = try { val data = new Array[Byte](record.value.remaining) @@ -626,11 +634,11 @@ object DumpLogSegments { private val transactionLogOpt = parser.accepts("transaction-log-decoder", "If set, log data will be parsed as " + "transaction metadata from the __transaction_state topic.") private val clusterMetadataOpt = parser.accepts("cluster-metadata-decoder", "If set, log data will be parsed as cluster metadata records.") - private val remoteMetadataOpt = parser.accepts("remote-log-metadata-decoder", "If set, log data will be parsed as TopicBasedRemoteLogMetadataManager (RLMM) metadata records." + + private val remoteMetadataOpt = parser.accepts("remote-log-metadata-decoder", "If set, log data will be parsed as TopicBasedRemoteLogMetadataManager (RLMM) metadata records." + " Instead, the value-decoder-class option can be used if a custom RLMM implementation is configured.") private val shareStateOpt = parser.accepts("share-group-state-decoder", "If set, log data will be parsed as share group state data from the " + "__share_group_state topic.") - private val skipRecordMetadataOpt = parser.accepts("skip-record-metadata", "Whether to skip printing metadata for each record.") + private val skipRecordMetadataOpt = parser.accepts("skip-record-metadata", "Skip metadata when printing records. This flag also skips control records.") options = parser.parse(args : _*) def messageParser: MessageParser[_, _] = diff --git a/core/src/main/scala/kafka/tools/StorageTool.scala b/core/src/main/scala/kafka/tools/StorageTool.scala index 40892bca38c92..d8048d4d0aa2c 100644 --- a/core/src/main/scala/kafka/tools/StorageTool.scala +++ b/core/src/main/scala/kafka/tools/StorageTool.scala @@ -126,30 +126,46 @@ object StorageTool extends Logging { setClusterId(namespace.getString("cluster_id")). setUnstableFeatureVersionsEnabled(config.unstableFeatureVersionsEnabled). setIgnoreFormatted(namespace.getBoolean("ignore_formatted")). - setControllerListenerName(config.controllerListenerNames.head). + setControllerListenerName(config.controllerListenerNames.get(0)). setMetadataLogDirectory(config.metadataLogDir) - Option(namespace.getString("release_version")).foreach( - releaseVersion => formatter. - setReleaseVersion(MetadataVersion.fromVersionString(releaseVersion))) + + Option(namespace.getString("release_version")).foreach(releaseVersion => { + try { + formatter.setReleaseVersion(MetadataVersion.fromVersionString(releaseVersion, config.unstableFeatureVersionsEnabled)) + } catch { + case e: Throwable => + throw new TerseFailure(e.getMessage) + } + }) + Option(namespace.getList[String]("feature")).foreach( featureNamesAndLevels(_).foreachEntry { (k, v) => formatter.setFeatureLevel(k, v) }) - Option(namespace.getString("initial_controllers")). + val initialControllers = namespace.getString("initial_controllers") + val isStandalone = namespace.getBoolean("standalone") + val staticVotersEmpty = config.quorumConfig.voters().isEmpty + formatter.setHasDynamicQuorum(staticVotersEmpty) + if (!staticVotersEmpty && (Option(initialControllers).isDefined || isStandalone)) { + throw new TerseFailure("You cannot specify " + + QuorumConfig.QUORUM_VOTERS_CONFIG + " and format the node " + + "with --initial-controllers or --standalone. " + + "If you want to use dynamic quorum, please remove " + + QuorumConfig.QUORUM_VOTERS_CONFIG + " and specify " + + QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG + " instead.") + } + Option(initialControllers). foreach(v => formatter.setInitialControllers(DynamicVoters.parse(v))) - if (namespace.getBoolean("standalone")) { + if (isStandalone) { formatter.setInitialControllers(createStandaloneDynamicVoters(config)) } - if (namespace.getBoolean("no_initial_controllers")) { - formatter.setNoInitialControllersFlag(true) - } else { - if (config.processRoles.contains(ProcessRole.ControllerRole)) { - if (config.quorumConfig.voters().isEmpty && formatter.initialVoters().isEmpty) { + if (!namespace.getBoolean("no_initial_controllers") && + config.processRoles.contains(ProcessRole.ControllerRole) && + staticVotersEmpty && + formatter.initialVoters().isEmpty) { throw new TerseFailure("Because " + QuorumConfig.QUORUM_VOTERS_CONFIG + " is not set on this controller, you must specify one of the following: " + "--standalone, --initial-controllers, or --no-initial-controllers."); - } - } } Option(namespace.getList("add_scram")). foreach(scramArgs => formatter.setScramArguments(scramArgs.asInstanceOf[util.List[String]])) @@ -161,9 +177,9 @@ object StorageTool extends Logging { * Maps the given release version to the corresponding metadata version * and prints the corresponding features. * - * @param namespace Arguments containing the release version. - * @param printStream The print stream to output the version mapping. - * @param validFeatures List of features to be considered in the output + * @param namespace Arguments containing the release version. + * @param printStream The print stream to output the version mapping. + * @param validFeatures List of features to be considered in the output. */ def runVersionMappingCommand( namespace: Namespace, @@ -172,7 +188,7 @@ object StorageTool extends Logging { ): Unit = { val releaseVersion = Option(namespace.getString("release_version")).getOrElse(MetadataVersion.LATEST_PRODUCTION.toString) try { - val metadataVersion = MetadataVersion.fromVersionString(releaseVersion) + val metadataVersion = MetadataVersion.fromVersionString(releaseVersion, true) val metadataVersionLevel = metadataVersion.featureLevel() printStream.print(f"metadata.version=$metadataVersionLevel%d ($releaseVersion%s)%n") @@ -183,8 +199,7 @@ object StorageTool extends Logging { } } catch { case e: IllegalArgumentException => - throw new TerseFailure(s"Unknown release version '$releaseVersion'. Supported versions are: " + - s"${MetadataVersion.MINIMUM_VERSION.version} to ${MetadataVersion.LATEST_PRODUCTION.version}") + throw new TerseFailure(e.getMessage) } } @@ -319,18 +334,21 @@ object StorageTool extends Logging { val reconfigurableQuorumOptions = formatParser.addMutuallyExclusiveGroup() reconfigurableQuorumOptions.addArgument("--standalone", "-s") - .help("Used to initialize a controller as a single-node dynamic quorum.") + .help("Used to initialize a controller as a single-node dynamic quorum. When setting this flag, " + + "the controller.quorum.voters config must not be set, and controller.quorum.bootstrap.servers is set instead.") .action(storeTrue()) reconfigurableQuorumOptions.addArgument("--no-initial-controllers", "-N") - .help("Used to initialize a server without a dynamic quorum topology.") + .help("Used to initialize a server without specifying a dynamic quorum. When setting this flag, " + + "the controller.quorum.voters config should not be set, and controller.quorum.bootstrap.servers is set instead.") .action(storeTrue()) reconfigurableQuorumOptions.addArgument("--initial-controllers", "-I") - .help("Used to initialize a server with a specific dynamic quorum topology. The argument " + + .help("Used to initialize a server with the specified dynamic quorum. The argument " + "is a comma-separated list of id@hostname:port:directory. The same values must be used to " + "format all nodes. For example:\n0@example.com:8082:JEXY6aqzQY-32P5TStzaFg,1@example.com:8083:" + - "MvDxzVmcRsaTz33bUuRU6A,2@example.com:8084:07R5amHmR32VDA6jHkGbTA\n") + "MvDxzVmcRsaTz33bUuRU6A,2@example.com:8084:07R5amHmR32VDA6jHkGbTA\n. When setting this flag, " + + "the controller.quorum.voters config must not be set, and controller.quorum.bootstrap.servers is set instead.") .action(store()) } @@ -376,7 +394,7 @@ object StorageTool extends Logging { def configToLogDirectories(config: KafkaConfig): Seq[String] = { val directories = new mutable.TreeSet[String] - directories ++= config.logDirs + directories ++= config.logDirs.asScala Option(config.metadataLogDir).foreach(directories.add) directories.toSeq } diff --git a/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala b/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala index 2e9d8e2bb8a00..081fbec3c95d7 100644 --- a/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala +++ b/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala @@ -18,7 +18,6 @@ package kafka.tools import kafka.network.RequestChannel -import kafka.raft.RaftManager import kafka.server.ApiRequestHandler import kafka.utils.Logging import org.apache.kafka.common.internals.FatalExitError @@ -26,6 +25,7 @@ import org.apache.kafka.common.message.{BeginQuorumEpochResponseData, EndQuorumE import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage} import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, BeginQuorumEpochResponse, EndQuorumEpochResponse, FetchResponse, FetchSnapshotResponse, VoteResponse} import org.apache.kafka.common.utils.Time +import org.apache.kafka.raft.RaftManager import org.apache.kafka.server.ApiVersionManager import org.apache.kafka.server.common.RequestLocal diff --git a/core/src/main/scala/kafka/tools/TestRaftServer.scala b/core/src/main/scala/kafka/tools/TestRaftServer.scala index c07538aadadac..48e101443a1f5 100644 --- a/core/src/main/scala/kafka/tools/TestRaftServer.scala +++ b/core/src/main/scala/kafka/tools/TestRaftServer.scala @@ -22,21 +22,21 @@ import java.util.concurrent.atomic.{AtomicInteger, AtomicLong} import java.util.concurrent.{CompletableFuture, CountDownLatch, LinkedBlockingDeque, TimeUnit} import joptsimple.{OptionException, OptionSpec} import kafka.network.SocketServer -import kafka.raft.{DefaultExternalKRaftMetrics, KafkaRaftManager, RaftManager} +import kafka.raft.{DefaultExternalKRaftMetrics, KafkaRaftManager} import kafka.server.{KafkaConfig, KafkaRequestHandlerPool} import kafka.utils.{CoreUtils, Logging} -import org.apache.kafka.common.errors.InvalidConfigurationException import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing import org.apache.kafka.common.metrics.stats.{Meter, Percentile, Percentiles} +import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ObjectSerializationCache, Writable} import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{Exit, Time, Utils} import org.apache.kafka.common.{TopicPartition, Uuid, protocol} import org.apache.kafka.raft.errors.NotLeaderException -import org.apache.kafka.raft.{Batch, BatchReader, Endpoints, LeaderAndEpoch, QuorumConfig, RaftClient} +import org.apache.kafka.raft.{Batch, BatchReader, Endpoints, LeaderAndEpoch, QuorumConfig, RaftClient, RaftManager} import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.SimpleApiVersionManager import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} @@ -88,7 +88,7 @@ class TestRaftServer( val endpoints = Endpoints.fromInetSocketAddresses( config.effectiveAdvertisedControllerListeners .map { endpoint => - (endpoint.listenerName, InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) + (ListenerName.normalised(endpoint.listener), InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) } .toMap .asJava @@ -114,8 +114,8 @@ class TestRaftServer( workloadGenerator = new RaftWorkloadGenerator( raftManager, time, - recordsPerSec = 20000, - recordSize = 256 + recordsPerSec = throughput, + recordSize = recordSize ) val requestHandler = new TestRaftRequestHandler( @@ -180,7 +180,7 @@ class TestRaftServer( private var claimedEpoch: Option[Int] = None - raftManager.register(this) + raftManager.client.register(this) override def handleLeaderChange(newLeaderAndEpoch: LeaderAndEpoch): Unit = { if (newLeaderAndEpoch.isLeader(config.nodeId)) { @@ -427,7 +427,7 @@ object TestRaftServer extends Logging { } private class TestRaftServerOptions(args: Array[String]) extends CommandDefaultOptions(args) { - val configOpt: OptionSpec[String] = parser.accepts("config", "Required configured file") + val configOpt: OptionSpec[String] = parser.accepts("config", "REQUIRED: The configured file") .withRequiredArg .describedAs("filename") .ofType(classOf[String]) @@ -445,12 +445,14 @@ object TestRaftServer extends Logging { .ofType(classOf[Int]) .defaultsTo(256) - val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "The directory id of the replica") + val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "REQUIRED: The directory id of the replica") .withRequiredArg .describedAs("directory id") .ofType(classOf[String]) options = parser.parse(args : _*) + + def checkArgs(): Unit = CommandLineUtils.checkRequiredArgs(parser, options, configOpt, directoryId) } def main(args: Array[String]): Unit = { @@ -458,16 +460,11 @@ object TestRaftServer extends Logging { try { CommandLineUtils.maybePrintHelpOrVersion(opts, "Standalone raft server for performance testing") + opts.checkArgs() val configFile = opts.options.valueOf(opts.configOpt) - if (configFile == null) { - throw new InvalidConfigurationException("Missing configuration file. Should specify with '--config'") - } - val directoryIdAsString = opts.options.valueOf(opts.directoryId) - if (directoryIdAsString == null) { - throw new InvalidConfigurationException("Missing replica directory id. Should specify with --replica-directory-id") - } + val serverProps = Utils.loadProps(configFile) // KafkaConfig requires either `process.roles` or `zookeeper.connect`. Neither are diff --git a/core/src/main/scala/kafka/utils/CoreUtils.scala b/core/src/main/scala/kafka/utils/CoreUtils.scala index 98a06f80f4652..66f9bd4865731 100755 --- a/core/src/main/scala/kafka/utils/CoreUtils.scala +++ b/core/src/main/scala/kafka/utils/CoreUtils.scala @@ -23,10 +23,9 @@ import java.lang.management.ManagementFactory import com.typesafe.scalalogging.Logger import javax.management.ObjectName -import scala.collection._ import scala.collection.Seq -import org.apache.kafka.network.EndPoint import org.apache.commons.validator.routines.InetAddressValidator +import org.apache.kafka.common.Endpoint import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Utils @@ -75,9 +74,9 @@ object CoreUtils { /** * Recursively delete the list of files/directories and any subfiles (if any exist) - * @param files sequence of files to be deleted + * @param files list of files to be deleted */ - def delete(files: Seq[String]): Unit = files.foreach(f => Utils.delete(new File(f))) + def delete(files: java.util.List[String]): Unit = files.forEach(f => Utils.delete(new File(f))) /** * Register the given mbean with the platform mbean server, @@ -122,22 +121,22 @@ object CoreUtils { def inWriteLock[T](lock: ReadWriteLock)(fun: => T): T = inLock[T](lock.writeLock)(fun) - def listenerListToEndPoints(listeners: String, securityProtocolMap: Map[ListenerName, SecurityProtocol]): Seq[EndPoint] = { + def listenerListToEndPoints(listeners: java.util.List[String], securityProtocolMap: java.util.Map[ListenerName, SecurityProtocol]): Seq[Endpoint] = { listenerListToEndPoints(listeners, securityProtocolMap, requireDistinctPorts = true) } - private def checkDuplicateListenerPorts(endpoints: Seq[EndPoint], listeners: String): Unit = { + private def checkDuplicateListenerPorts(endpoints: Seq[Endpoint], listeners: java.util.List[String]): Unit = { val distinctPorts = endpoints.map(_.port).distinct require(distinctPorts.size == endpoints.map(_.port).size, s"Each listener must have a different port, listeners: $listeners") } - def listenerListToEndPoints(listeners: String, securityProtocolMap: Map[ListenerName, SecurityProtocol], requireDistinctPorts: Boolean): Seq[EndPoint] = { + def listenerListToEndPoints(listeners: java.util.List[String], securityProtocolMap: java.util.Map[ListenerName, SecurityProtocol], requireDistinctPorts: Boolean): Seq[Endpoint] = { def validateOneIsIpv4AndOtherIpv6(first: String, second: String): Boolean = (inetAddressValidator.isValidInet4Address(first) && inetAddressValidator.isValidInet6Address(second)) || (inetAddressValidator.isValidInet6Address(first) && inetAddressValidator.isValidInet4Address(second)) - def validate(endPoints: Seq[EndPoint]): Unit = { - val distinctListenerNames = endPoints.map(_.listenerName).distinct + def validate(endPoints: Seq[Endpoint]): Unit = { + val distinctListenerNames = endPoints.map(_.listener).distinct require(distinctListenerNames.size == endPoints.size, s"Each listener must have a different name, listeners: $listeners") val (duplicatePorts, _) = endPoints.filter { @@ -186,8 +185,7 @@ object CoreUtils { } val endPoints = try { - SocketServerConfigs.listenerListToEndPoints(listeners, securityProtocolMap.asJava). - asScala.map(EndPoint.fromPublic) + SocketServerConfigs.listenerListToEndPoints(listeners, securityProtocolMap).asScala } catch { case e: Exception => throw new IllegalArgumentException(s"Error creating broker listeners from '$listeners': ${e.getMessage}", e) diff --git a/core/src/main/scala/kafka/utils/Logging.scala b/core/src/main/scala/kafka/utils/Logging.scala index 7518fecd2f93f..e08a6873fc1ef 100755 --- a/core/src/main/scala/kafka/utils/Logging.scala +++ b/core/src/main/scala/kafka/utils/Logging.scala @@ -18,9 +18,9 @@ package kafka.utils import com.typesafe.scalalogging.Logger +import org.apache.kafka.server.logger.LoggingController import org.slf4j.{LoggerFactory, Marker, MarkerFactory} - object Log4jControllerRegistration { private val logger = Logger(this.getClass.getName) diff --git a/core/src/main/scala/kafka/utils/LoggingController.scala b/core/src/main/scala/kafka/utils/LoggingController.scala deleted file mode 100755 index 9d8de03a6c5d3..0000000000000 --- a/core/src/main/scala/kafka/utils/LoggingController.scala +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import com.typesafe.scalalogging.Logger -import kafka.utils.LoggingController.ROOT_LOGGER -import org.apache.kafka.common.utils.Utils -import org.apache.logging.log4j.core.LoggerContext -import org.apache.logging.log4j.core.config.Configurator -import org.apache.logging.log4j.{Level, LogManager} - -import java.util -import java.util.Locale -import scala.jdk.CollectionConverters._ - - -object LoggingController { - - private val logger = Logger[LoggingController] - - /** - * Note: In Log4j 1, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. - * - * The root logger's name is changed in log4j2 to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]) but for backward- - * compatibility. Kafka keeps its original root logger name. It is why here is a dedicated definition for the root logger name. - */ - val ROOT_LOGGER = "root" - - private[this] val delegate: LoggingControllerDelegate = { - try { - new Log4jCoreController - } catch { - case _: ClassCastException | _: LinkageError => - logger.info("No supported logging implementation found. Logging configuration endpoint will be disabled.") - new NoOpController - case e: Exception => - logger.warn("A problem occurred, while initializing the logging controller. Logging configuration endpoint will be disabled.", e) - new NoOpController - } - } - - /** - * Returns a map of the log4j loggers and their assigned log level. - * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. - */ - def loggers: Map[String, String] = delegate.loggers - - /** - * Sets the log level of a particular logger. If the given logLevel is not an available level - * (i.e., one of OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL) it falls back to DEBUG. - * - * @see [[Level.toLevel]] - */ - def logLevel(loggerName: String, logLevel: String): Boolean = delegate.logLevel(loggerName, logLevel) - - def unsetLogLevel(loggerName: String): Boolean = delegate.unsetLogLevel(loggerName) - - def loggerExists(loggerName: String): Boolean = delegate.loggerExists(loggerName) -} - -private class NoOpController extends LoggingControllerDelegate { - override def loggers: Map[String, String] = Map.empty - - override def logLevel(loggerName: String, logLevel: String): Boolean = false - - override def unsetLogLevel(loggerName: String): Boolean = false -} - -private class Log4jCoreController extends LoggingControllerDelegate { - private[this] val logContext = LogManager.getContext(false).asInstanceOf[LoggerContext] - - override def loggers: Map[String, String] = { - val rootLoggerLevel = logContext.getRootLogger.getLevel.toString - - // Loggers defined in the configuration - val configured = logContext.getConfiguration.getLoggers.asScala - .values - .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) - .map { logger => - logger.getName -> logger.getLevel.toString - }.toMap - - // Loggers actually running - val actual = logContext.getLoggers.asScala - .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) - .map { logger => - logger.getName -> logger.getLevel.toString - }.toMap - - (configured ++ actual) + (ROOT_LOGGER -> rootLoggerLevel) - } - - override def logLevel(loggerName: String, logLevel: String): Boolean = { - if (Utils.isBlank(loggerName) || Utils.isBlank(logLevel)) - return false - - val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT)) - - if (loggerName == ROOT_LOGGER) { - Configurator.setLevel(LogManager.ROOT_LOGGER_NAME, level) - true - } else { - if (loggerExists(loggerName) && level != null) { - Configurator.setLevel(loggerName, level) - true - } - else false - } - } - - override def unsetLogLevel(loggerName: String): Boolean = { - val nullLevel: Level = null - if (loggerName == ROOT_LOGGER) { - Configurator.setLevel(LogManager.ROOT_LOGGER_NAME, nullLevel) - true - } else { - if (loggerExists(loggerName)) { - Configurator.setLevel(loggerName, nullLevel) - true - } - else false - } - } -} - -private abstract class LoggingControllerDelegate { - def loggers: Map[String, String] - def logLevel(loggerName: String, logLevel: String): Boolean - def unsetLogLevel(loggerName: String): Boolean - def loggerExists(loggerName: String): Boolean = loggers.contains(loggerName) -} - -/** - * An MBean that allows the user to dynamically alter log4j levels at runtime. - * The companion object contains the singleton instance of this class and - * registers the MBean. The [[kafka.utils.Logging]] trait forces initialization - * of the companion object. - */ -class LoggingController extends LoggingControllerMBean { - - def getLoggers: util.List[String] = { - // we replace scala collection by java collection so mbean client is able to deserialize it without scala library. - new util.ArrayList[String](LoggingController.loggers.map { - case (logger, level) => s"$logger=$level" - }.toSeq.asJava) - } - - def getLogLevel(loggerName: String): String = { - LoggingController.loggers.getOrElse(loggerName, "No such logger.") - } - - def setLogLevel(loggerName: String, level: String): Boolean = LoggingController.logLevel(loggerName, level) -} - -trait LoggingControllerMBean { - def getLoggers: java.util.List[String] - def getLogLevel(logger: String): String - def setLogLevel(logger: String, level: String): Boolean -} diff --git a/core/src/main/scala/kafka/utils/Mx4jLoader.scala b/core/src/main/scala/kafka/utils/Mx4jLoader.scala index 5fbbebed47572..7e8b1dba53e6a 100644 --- a/core/src/main/scala/kafka/utils/Mx4jLoader.scala +++ b/core/src/main/scala/kafka/utils/Mx4jLoader.scala @@ -30,12 +30,14 @@ import javax.management.ObjectName * * This is a Scala port of org.apache.cassandra.utils.Mx4jTool written by Ran Tavory for CASSANDRA-1068 * */ +@deprecated object Mx4jLoader extends Logging { def maybeLoad(): Boolean = { val props = new VerifiableProperties(System.getProperties) if (!props.getBoolean("kafka_mx4jenable", default = false)) return false + warn("MX4j is deprecated and will be removed in Kafka 5.0") val address = props.getString("mx4jaddress", "0.0.0.0") val port = props.getInt("mx4jport", 8082) try { diff --git a/core/src/main/scala/kafka/utils/Pool.scala b/core/src/main/scala/kafka/utils/Pool.scala deleted file mode 100644 index 1a59b41bad51c..0000000000000 --- a/core/src/main/scala/kafka/utils/Pool.scala +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import java.util.concurrent._ - -import org.apache.kafka.common.KafkaException - -import collection.Set -import scala.jdk.CollectionConverters._ - -class Pool[K,V](valueFactory: Option[K => V] = None) extends Iterable[(K, V)] { - - private val pool: ConcurrentMap[K, V] = new ConcurrentHashMap[K, V] - - def put(k: K, v: V): V = pool.put(k, v) - - def putAll(map: java.util.Map[K, V]): Unit = pool.putAll(map) - - def putIfNotExists(k: K, v: V): V = pool.putIfAbsent(k, v) - - /** - * Gets the value associated with the given key. If there is no associated - * value, then create the value using the pool's value factory and return the - * value associated with the key. The user should declare the factory method - * as lazy if its side-effects need to be avoided. - * - * @param key The key to lookup. - * @return The final value associated with the key. - */ - def getAndMaybePut(key: K): V = { - if (valueFactory.isEmpty) - throw new KafkaException("Empty value factory in pool.") - getAndMaybePut(key, valueFactory.get(key)) - } - - /** - * Gets the value associated with the given key. If there is no associated - * value, then create the value using the provided by `createValue` and return the - * value associated with the key. - * - * @param key The key to lookup. - * @param createValue Factory function. - * @return The final value associated with the key. - */ - def getAndMaybePut(key: K, createValue: => V): V = - pool.computeIfAbsent(key, _ => createValue) - - def contains(id: K): Boolean = pool.containsKey(id) - - def get(key: K): V = pool.get(key) - - def remove(key: K): V = pool.remove(key) - - def remove(key: K, value: V): Boolean = pool.remove(key, value) - - def removeAll(keys: Iterable[K]): Unit = pool.keySet.removeAll(keys.asJavaCollection) - - def keys: Set[K] = pool.keySet.asScala - - def values: Iterable[V] = pool.values.asScala - - def clear(): Unit = { pool.clear() } - - def foreachEntry(f: (K, V) => Unit): Unit = { - pool.forEach((k, v) => f(k, v)) - } - - override def size: Int = pool.size - - override def iterator: Iterator[(K, V)] = new Iterator[(K,V)]() { - - private val iter = pool.entrySet.iterator - - def hasNext: Boolean = iter.hasNext - - def next(): (K, V) = { - val n = iter.next - (n.getKey, n.getValue) - } - - } - -} diff --git a/core/src/test/java/kafka/security/minikdc/MiniKdc.java b/core/src/test/java/kafka/security/minikdc/MiniKdc.java index 99740a133d092..b612543771d81 100644 --- a/core/src/test/java/kafka/security/minikdc/MiniKdc.java +++ b/core/src/test/java/kafka/security/minikdc/MiniKdc.java @@ -136,8 +136,8 @@ public class MiniKdc { * MiniKdc. */ public MiniKdc(Properties config, File workDir) { - Set requiredProperties = new HashSet<>(List.of(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, - INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME)); + Set requiredProperties = Set.of(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, + INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME); if (!config.keySet().containsAll(requiredProperties)) { throw new IllegalArgumentException("Missing required properties: " + requiredProperties); } diff --git a/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java b/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java deleted file mode 100644 index daa0aacca7de4..0000000000000 --- a/core/src/test/java/kafka/server/MetadataVersionConfigValidatorTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server; - -import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.image.MetadataDelta; -import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.image.MetadataProvenance; -import org.apache.kafka.image.loader.LogDeltaManifest; -import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.server.common.MetadataVersion; -import org.apache.kafka.server.fault.FaultHandler; - -import org.junit.jupiter.api.Test; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.willAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -public class MetadataVersionConfigValidatorTest { - - private static final LogDeltaManifest TEST_MANIFEST = LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(90) - .numBytes(88) - .build(); - public static final MetadataProvenance TEST_PROVENANCE = - new MetadataProvenance(50, 3, 8000, true); - - void testWith(MetadataVersion metadataVersion, KafkaConfig config, FaultHandler faultHandler) throws Exception { - try (MetadataVersionConfigValidator validator = new MetadataVersionConfigValidator(config, faultHandler)) { - MetadataDelta delta = new MetadataDelta.Builder() - .setImage(MetadataImage.EMPTY) - .build(); - if (metadataVersion != null) { - delta.replay(new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(metadataVersion.featureLevel())); - } - MetadataImage image = delta.apply(TEST_PROVENANCE); - - validator.onMetadataUpdate(delta, image, TEST_MANIFEST); - } - } - - @Test - void testValidatesConfigOnMetadataChange() throws Exception { - MetadataVersion metadataVersion = MetadataVersion.IBP_3_7_IV2; - KafkaConfig config = mock(KafkaConfig.class); - FaultHandler faultHandler = mock(FaultHandler.class); - - when(config.brokerId()).thenReturn(8); - - testWith(metadataVersion, config, faultHandler); - - verify(config, times(1)).validateWithMetadataVersion(eq(metadataVersion)); - verifyNoMoreInteractions(faultHandler); - } - - @SuppressWarnings("ThrowableNotThrown") - @Test - void testInvokesFaultHandlerOnException() throws Exception { - MetadataVersion metadataVersion = MetadataVersion.IBP_3_7_IV2; - Exception exception = new Exception(); - KafkaConfig config = mock(KafkaConfig.class); - FaultHandler faultHandler = mock(FaultHandler.class); - - when(faultHandler.handleFault(any(), any())).thenReturn(new RuntimeException("returned exception")); - when(config.brokerId()).thenReturn(8); - willAnswer(invocation -> { - throw exception; - }).given(config).validateWithMetadataVersion(eq(metadataVersion)); - - testWith(metadataVersion, config, faultHandler); - - verify(config, times(1)).validateWithMetadataVersion(eq(metadataVersion)); - verify(faultHandler, times(1)).handleFault( - eq("Broker configuration does not support the cluster MetadataVersion"), - eq(exception)); - } -} diff --git a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java deleted file mode 100644 index fa906a8ffb431..0000000000000 --- a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server; - -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.FeatureMetadata; -import org.apache.kafka.clients.admin.QuorumInfo; -import org.apache.kafka.clients.admin.RaftVoterEndpoint; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.test.KafkaClusterTestKit; -import org.apache.kafka.common.test.TestKitNodes; -import org.apache.kafka.server.common.KRaftVersion; -import org.apache.kafka.test.TestUtils; - -import org.junit.jupiter.api.Test; - -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class ReconfigurableQuorumIntegrationTest { - static void checkKRaftVersions(Admin admin, short finalized) throws Exception { - FeatureMetadata featureMetadata = admin.describeFeatures().featureMetadata().get(); - if (finalized > 0) { - assertTrue(featureMetadata.finalizedFeatures().containsKey(KRaftVersion.FEATURE_NAME), - "finalizedFeatures does not contain " + KRaftVersion.FEATURE_NAME + ", finalizedFeatures: " + featureMetadata.finalizedFeatures()); - assertEquals(finalized, featureMetadata.finalizedFeatures(). - get(KRaftVersion.FEATURE_NAME).minVersionLevel()); - assertEquals(finalized, featureMetadata.finalizedFeatures(). - get(KRaftVersion.FEATURE_NAME).maxVersionLevel()); - } else { - assertFalse(featureMetadata.finalizedFeatures().containsKey(KRaftVersion.FEATURE_NAME)); - } - assertEquals((short) 0, featureMetadata.supportedFeatures(). - get(KRaftVersion.FEATURE_NAME).minVersion()); - assertEquals((short) 1, featureMetadata.supportedFeatures(). - get(KRaftVersion.FEATURE_NAME).maxVersion()); - } - - @Test - public void testCreateAndDestroyNonReconfigurableCluster() throws Exception { - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(1). - build() - ).build()) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, () -> { - checkKRaftVersions(admin, KRaftVersion.KRAFT_VERSION_0.featureLevel()); - }); - } - } - } - - @Test - public void testCreateAndDestroyReconfigurableCluster() throws Exception { - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(1). - setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). - build() - ).build()) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, () -> { - checkKRaftVersions(admin, KRaftVersion.KRAFT_VERSION_1.featureLevel()); - }); - } - } - } - - static Map findVoterDirs(Admin admin) throws Exception { - QuorumInfo quorumInfo = admin.describeMetadataQuorum().quorumInfo().get(); - Map result = new TreeMap<>(); - quorumInfo.voters().forEach(v -> { - result.put(v.replicaId(), v.replicaDirectoryId()); - }); - return result; - } - - @Test - public void testRemoveController() throws Exception { - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(3). - setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). - build() - ).build()) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { - Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3000, 3001, 3002)), voters.keySet()); - for (int replicaId : new int[] {3000, 3001, 3002}) { - assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); - } - }); - admin.removeRaftVoter(3000, cluster.nodes(). - controllerNodes().get(3000).metadataDirectoryId()).all().get(); - } - } - } - - @Test - public void testRemoveAndAddSameController() throws Exception { - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(4). - setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). - build()).build() - ) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { - Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3000, 3001, 3002, 3003)), voters.keySet()); - for (int replicaId : new int[] {3000, 3001, 3002, 3003}) { - assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); - } - }); - Uuid dirId = cluster.nodes().controllerNodes().get(3000).metadataDirectoryId(); - admin.removeRaftVoter(3000, dirId).all().get(); - TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { - Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3001, 3002, 3003)), voters.keySet()); - for (int replicaId : new int[] {3001, 3002, 3003}) { - assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); - } - }); - admin.addRaftVoter( - 3000, - dirId, - Set.of(new RaftVoterEndpoint("CONTROLLER", "example.com", 8080)) - ).all().get(); - } - } - } -} diff --git a/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java b/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java deleted file mode 100644 index 65c75d584db78..0000000000000 --- a/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server.integration; -import kafka.integration.KafkaServerTestHarness; -import kafka.server.KafkaBroker; -import kafka.server.KafkaConfig; -import kafka.utils.Logging; -import kafka.utils.TestUtils; - -import org.apache.kafka.clients.CommonClientConfigs; -import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.AlterConfigOp; -import org.apache.kafka.clients.admin.ConfigEntry; -import org.apache.kafka.clients.admin.FeatureUpdate; -import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.clients.admin.TopicDescription; -import org.apache.kafka.clients.admin.UpdateFeaturesOptions; -import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartitionInfo; -import org.apache.kafka.common.config.ConfigResource; -import org.apache.kafka.common.config.TopicConfig; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.serialization.StringSerializer; -import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; -import org.apache.kafka.server.common.MetadataVersion; -import org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.function.BiFunction; -import java.util.stream.Collectors; - -import scala.collection.JavaConverters; -import scala.collection.Seq; -import scala.collection.mutable.HashMap; - -import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarness implements Logging { - private String bootstrapServer; - private String testTopicName; - private Admin adminClient; - - @Override - public MetadataVersion metadataVersion() { - return MetadataVersion.IBP_4_0_IV1; - } - - @Override - public Seq generateConfigs() { - List brokerConfigs = new ArrayList<>(); - brokerConfigs.addAll(scala.collection.JavaConverters.seqAsJavaList(TestUtils.createBrokerConfigs( - 5, // The tests require 4 brokers to host the partition. However, we need the 5th broker to handle the admin client requests. - true, - true, - scala.Option.empty(), - scala.Option.empty(), - scala.Option.empty(), - true, - false, - false, - false, - new HashMap<>(), - 1, - false, - 1, - (short) 4, - 0, - false - ))); - List configs = new ArrayList<>(); - for (Properties props : brokerConfigs) { - configs.add(KafkaConfig.fromProps(props)); - } - return JavaConverters.asScalaBuffer(configs).toSeq(); - } - - @BeforeEach - @Override - public void setUp(TestInfo info) { - super.setUp(info); - // create adminClient - Properties props = new Properties(); - bootstrapServer = bootstrapServers(listenerName()); - props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); - adminClient = Admin.create(props); - adminClient.updateFeatures( - Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, - new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)), - new UpdateFeaturesOptions() - ); - testTopicName = String.format("%s-%s", info.getTestMethod().get().getName(), "ELR-test"); - } - - @AfterEach - public void close() throws Exception { - if (adminClient != null) adminClient.close(); - } - - @ParameterizedTest - @ValueSource(strings = {"kraft"}) - public void testHighWatermarkShouldNotAdvanceIfUnderMinIsr(String quorum) throws ExecutionException, InterruptedException { - adminClient.createTopics( - List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get(); - TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); - - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); - Collection ops = new ArrayList<>(); - ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); - // alter configs on target cluster - adminClient.incrementalAlterConfigs(configOps).all().get(); - Producer producer = null; - Consumer consumer = null; - try { - TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName); - TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); - List initialReplicas = topicPartitionInfo.replicas(); - assertEquals(4, topicPartitionInfo.isr().size()); - assertEquals(0, topicPartitionInfo.elr().size()); - assertEquals(0, topicPartitionInfo.lastKnownElr().size()); - - Properties producerProps = new Properties(); - producerProps.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - producerProps.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); - // Use Ack=1 for the producer. - producerProps.put(ProducerConfig.ACKS_CONFIG, "1"); - producer = new KafkaProducer(producerProps); - - Properties consumerProps = new Properties(); - consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); - consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); - consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "10"); - consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - consumerProps.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - consumerProps.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - consumer = new KafkaConsumer<>(consumerProps); - consumer.subscribe(Set.of(testTopicName)); - - producer.send(new ProducerRecord<>(testTopicName, "0", "0")).get(); - waitUntilOneMessageIsConsumed(consumer); - - killBroker(initialReplicas.get(0).id()); - killBroker(initialReplicas.get(1).id()); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 2 && elrSize == 1; - }); - - // Now the partition is under min ISR. HWM should not advance. - producer.send(new ProducerRecord<>(testTopicName, "1", "1")).get(); - Thread.sleep(100); - assertEquals(0, consumer.poll(Duration.ofSeconds(1L)).count()); - - // Restore the min ISR and the previous log should be visible. - startBroker(initialReplicas.get(1).id()); - startBroker(initialReplicas.get(0).id()); - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 4 && elrSize == 0; - }); - - waitUntilOneMessageIsConsumed(consumer); - } finally { - restartDeadBrokers(false); - if (consumer != null) consumer.close(); - if (producer != null) producer.close(); - } - } - - void waitUntilOneMessageIsConsumed(Consumer consumer) { - TestUtils.waitUntilTrue( - () -> { - try { - ConsumerRecords record = consumer.poll(Duration.ofMillis(100L)); - return record.count() >= 1; - } catch (Exception e) { - return false; - } - }, - () -> "fail to consume messages", - DEFAULT_MAX_WAIT_MS, 100L - ); - } - - @ParameterizedTest - @ValueSource(strings = {"kraft"}) - public void testElrMemberCanBeElected(String quorum) throws ExecutionException, InterruptedException { - adminClient.createTopics( - List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get(); - TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); - - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); - Collection ops = new ArrayList<>(); - ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); - // alter configs on target cluster - adminClient.incrementalAlterConfigs(configOps).all().get(); - - try { - TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName); - TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); - List initialReplicas = topicPartitionInfo.replicas(); - assertEquals(4, topicPartitionInfo.isr().size()); - assertEquals(0, topicPartitionInfo.elr().size()); - assertEquals(0, topicPartitionInfo.lastKnownElr().size()); - - killBroker(initialReplicas.get(0).id()); - killBroker(initialReplicas.get(1).id()); - killBroker(initialReplicas.get(2).id()); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 1 && elrSize == 2; - }); - - killBroker(initialReplicas.get(3).id()); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 0 && elrSize == 3; - }); - - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - assertEquals(1, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); - int expectLastKnownLeader = initialReplicas.get(3).id(); - assertEquals(expectLastKnownLeader, topicPartitionInfo.lastKnownElr().get(0).id(), topicPartitionInfo.toString()); - - // At this point, all the replicas are failed and the last know leader is No.3 and 3 members in the ELR. - // Restart one broker of the ELR and it should be the leader. - - int expectLeader = topicPartitionInfo.elr().stream() - .filter(node -> node.id() != expectLastKnownLeader).toList().get(0).id(); - - startBroker(expectLeader); - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 1 && elrSize == 2; - }); - - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); - assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString()); - - // Start another 2 brokers and the ELR fields should be cleaned. - topicPartitionInfo.replicas().stream().filter(node -> node.id() != expectLeader).limit(2) - .forEach(node -> startBroker(node.id())); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 3 && elrSize == 0; - }); - - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); - assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString()); - } finally { - restartDeadBrokers(false); - } - } - - @ParameterizedTest - @ValueSource(strings = {"kraft"}) - public void testElrMemberShouldBeKickOutWhenUncleanShutdown(String quorum) throws ExecutionException, InterruptedException { - adminClient.createTopics( - List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get(); - TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); - - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); - Collection ops = new ArrayList<>(); - ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); - // alter configs on target cluster - adminClient.incrementalAlterConfigs(configOps).all().get(); - - try { - TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName); - TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); - List initialReplicas = topicPartitionInfo.replicas(); - assertEquals(4, topicPartitionInfo.isr().size()); - assertEquals(0, topicPartitionInfo.elr().size()); - assertEquals(0, topicPartitionInfo.lastKnownElr().size()); - - killBroker(initialReplicas.get(0).id()); - killBroker(initialReplicas.get(1).id()); - killBroker(initialReplicas.get(2).id()); - killBroker(initialReplicas.get(3).id()); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 0 && elrSize == 3; - }); - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - - int brokerToBeUncleanShutdown = topicPartitionInfo.elr().get(0).id(); - KafkaBroker broker = brokers().find(b -> { - return b.config().brokerId() == brokerToBeUncleanShutdown; - }).get(); - Seq dirs = broker.logManager().liveLogDirs(); - assertEquals(1, dirs.size()); - CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString()); - assertTrue(handler.exists()); - assertDoesNotThrow(() -> handler.delete()); - - // After remove the clean shutdown file, the broker should report unclean shutdown during restart. - startBroker(brokerToBeUncleanShutdown); - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 0 && elrSize == 2; - }); - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - assertNull(topicPartitionInfo.leader()); - assertEquals(1, topicPartitionInfo.lastKnownElr().size()); - } finally { - restartDeadBrokers(false); - } - } - - /* - This test is only valid for KIP-966 part 1. When the unclean recovery is implemented, it should be removed. - */ - @ParameterizedTest - @ValueSource(strings = {"kraft"}) - public void testLastKnownLeaderShouldBeElectedIfEmptyElr(String quorum) throws ExecutionException, InterruptedException { - adminClient.createTopics( - List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get(); - TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); - - ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); - Collection ops = new ArrayList<>(); - ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); - // alter configs on target cluster - adminClient.incrementalAlterConfigs(configOps).all().get(); - - try { - TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName); - TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); - List initialReplicas = topicPartitionInfo.replicas(); - assertEquals(4, topicPartitionInfo.isr().size()); - assertEquals(0, topicPartitionInfo.elr().size()); - assertEquals(0, topicPartitionInfo.lastKnownElr().size()); - - killBroker(initialReplicas.get(0).id()); - killBroker(initialReplicas.get(1).id()); - killBroker(initialReplicas.get(2).id()); - killBroker(initialReplicas.get(3).id()); - - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 0 && elrSize == 3; - }); - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - int lastKnownLeader = topicPartitionInfo.lastKnownElr().get(0).id(); - - Set initialReplicaSet = initialReplicas.stream().map(node -> node.id()).collect(Collectors.toSet()); - brokers().foreach(broker -> { - if (initialReplicaSet.contains(broker.config().brokerId())) { - Seq dirs = broker.logManager().liveLogDirs(); - assertEquals(1, dirs.size()); - CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString()); - assertDoesNotThrow(() -> handler.delete()); - } - return true; - }); - - - // After remove the clean shutdown file, the broker should report unclean shutdown during restart. - topicPartitionInfo.replicas().forEach(replica -> { - if (replica.id() != lastKnownLeader) startBroker(replica.id()); - }); - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize == 0 && elrSize == 1; - }); - topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - assertNull(topicPartitionInfo.leader()); - assertEquals(1, topicPartitionInfo.lastKnownElr().size()); - - // Now if the last known leader goes through unclean shutdown, it will still be elected. - startBroker(lastKnownLeader); - waitForIsrAndElr((isrSize, elrSize) -> { - return isrSize > 0 && elrSize == 0; - }); - - TestUtils.waitUntilTrue( - () -> { - try { - TopicPartitionInfo partition = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName).partitions().get(0); - if (partition.leader() == null) return false; - return partition.lastKnownElr().isEmpty() && partition.elr().isEmpty() && partition.leader().id() == lastKnownLeader; - } catch (Exception e) { - return false; - } - }, - () -> String.format("Partition metadata for %s is not correct", testTopicName), - DEFAULT_MAX_WAIT_MS, 100L - ); - } finally { - restartDeadBrokers(false); - } - } - - void waitForIsrAndElr(BiFunction isIsrAndElrSizeSatisfied) { - TestUtils.waitUntilTrue( - () -> { - try { - TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName)) - .allTopicNames().get().get(testTopicName); - TopicPartitionInfo partition = topicDescription.partitions().get(0); - return isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size()); - } catch (Exception e) { - return false; - } - }, - () -> String.format("Partition metadata for %s is not propagated", testTopicName), - DEFAULT_MAX_WAIT_MS, 100L); - } -} diff --git a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java index b700e162929e3..b5c8740639c2f 100644 --- a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java +++ b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java @@ -16,13 +16,12 @@ */ package kafka.server.logger; -import kafka.utils.LoggingController; - import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig; +import org.apache.kafka.server.logger.LoggingController; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -82,18 +81,18 @@ public void testValidateBogusLogLevelNameNotAllowed() { @Test public void testValidateSetRootLogLevelConfig() { MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). - setName(LoggingController.ROOT_LOGGER()). + setName(LoggingController.ROOT_LOGGER). setConfigOperation(OpType.SET.id()). setValue("TRACE"))); } @Test public void testValidateRemoveRootLogLevelConfigNotAllowed() { - assertEquals("Removing the log level of the " + LoggingController.ROOT_LOGGER() + + assertEquals("Removing the log level of the " + LoggingController.ROOT_LOGGER + " logger is not allowed", Assertions.assertThrows(InvalidRequestException.class, () -> MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). - setName(LoggingController.ROOT_LOGGER()). + setName(LoggingController.ROOT_LOGGER). setConfigOperation(OpType.DELETE.id()). setValue("")))).getMessage()); } diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index bb8b51b40e297..ffa9f8b11456a 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -17,7 +17,6 @@ package kafka.server.share; import kafka.cluster.Partition; -import kafka.server.LogReadResult; import kafka.server.QuotaFactory; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; @@ -25,10 +24,16 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.KafkaStorageException; +import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.ShareFetchResponseData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Records; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.LogReadResult; +import org.apache.kafka.server.log.remote.storage.RemoteLogManager; import org.apache.kafka.server.purgatory.DelayedOperationKey; import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; import org.apache.kafka.server.share.SharePartitionKey; @@ -43,14 +48,18 @@ import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogOffsetSnapshot; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; import org.mockito.Mockito; import java.util.ArrayList; @@ -59,20 +68,30 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.stream.Collectors; +import scala.Option; import scala.Tuple2; +import scala.collection.Seq; import scala.jdk.javaapi.CollectionConverters; +import static kafka.server.share.PendingRemoteFetches.RemoteFetch; import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; +import static kafka.server.share.SharePartitionManagerTest.REMOTE_FETCH_MAX_WAIT_MS; import static kafka.server.share.SharePartitionManagerTest.buildLogReadResult; import static kafka.server.share.SharePartitionManagerTest.mockReplicaManagerDelayedShareFetch; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -95,6 +114,8 @@ public class DelayedShareFetchTest { private static final FetchParams FETCH_PARAMS = new FetchParams( FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); + private static final FetchDataInfo REMOTE_FETCH_INFO = new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), + MemoryRecords.EMPTY, false, Optional.empty(), Optional.of(mock(RemoteStorageFetchInfo.class))); private static final BrokerTopicStats BROKER_TOPIC_STATS = new BrokerTopicStats(); private Timer mockTimer; @@ -121,9 +142,6 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -135,13 +153,29 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + ReplicaManager replicaManager = mock(ReplicaManager.class); + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(new MockTime()); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withShareGroupMetrics(shareGroupMetrics) + .withFetchId(fetchId) + .withReplicaManager(replicaManager) .build()); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + // Since there is no partition that can be acquired, tryComplete should return false. assertFalse(delayedShareFetch.tryComplete()); assertFalse(delayedShareFetch.isCompleted()); @@ -166,9 +200,6 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -198,9 +229,19 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(100L).thenReturn(110L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) @@ -209,7 +250,12 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) .withShareGroupMetrics(shareGroupMetrics) .withTime(time) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); // Since sp1 cannot be acquired, tryComplete should return false. @@ -238,9 +284,6 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -264,12 +307,27 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); BiConsumer exceptionHandler = mockExceptionHandler(); + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withExceptionHandler(exceptionHandler) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); // Since sp1 cannot be acquired, tryComplete should return false. @@ -292,9 +350,6 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -317,6 +372,7 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(120L).thenReturn(140L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) @@ -324,7 +380,12 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) .withShareGroupMetrics(shareGroupMetrics) .withTime(time) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); // Since sp1 can be acquired, tryComplete should return true. @@ -351,9 +412,6 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -368,13 +426,19 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(90L).thenReturn(140L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) .withShareGroupMetrics(shareGroupMetrics) .withTime(time) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); @@ -405,9 +469,6 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); @@ -427,6 +488,7 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(10L).thenReturn(140L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) @@ -434,7 +496,12 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) .withShareGroupMetrics(shareGroupMetrics) .withTime(time) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); @@ -471,23 +538,27 @@ public void testToCompleteAnAlreadyCompletedFuture() { ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(new MockTime()); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) .withShareGroupMetrics(shareGroupMetrics) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); // Force completing the share fetch request for the first time should complete the future with an empty map. delayedShareFetch.forceComplete(); assertTrue(delayedShareFetch.isCompleted()); // Verifying that the first forceComplete calls acquirablePartitions method in DelayedShareFetch. - Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); + Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(sharePartitions); assertEquals(0, future.join().size()); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); @@ -497,7 +568,7 @@ public void testToCompleteAnAlreadyCompletedFuture() { delayedShareFetch.forceComplete(); assertTrue(delayedShareFetch.isCompleted()); // Verifying that the second forceComplete does not call acquirablePartitions method in DelayedShareFetch. - Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); + Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(sharePartitions); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); // Assert both metrics shall be recorded only once. @@ -520,10 +591,6 @@ public void testForceCompleteTriggersDelayedActionsQueue() { SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); - // No share partition is available for acquiring initially. - when(sp0.maybeAcquireFetchLock()).thenReturn(false); - when(sp1.maybeAcquireFetchLock()).thenReturn(false); - when(sp2.maybeAcquireFetchLock()).thenReturn(false); LinkedHashMap sharePartitions1 = new LinkedHashMap<>(); sharePartitions1.put(tp0, sp0); @@ -542,12 +609,32 @@ public void testForceCompleteTriggersDelayedActionsQueue() { List delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions1.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + Partition p2 = mock(Partition.class); + when(p2.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); + + Uuid fetchId1 = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch1 = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch1) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions1) + .withFetchId(fetchId1) .build(); + // No share partition is available for acquiring initially. + when(sp0.maybeAcquireFetchLock(fetchId1)).thenReturn(false); + when(sp1.maybeAcquireFetchLock(fetchId1)).thenReturn(false); + when(sp2.maybeAcquireFetchLock(fetchId1)).thenReturn(false); + // We add a delayed share fetch entry to the purgatory which will be waiting for completion since neither of the // partitions in the share fetch request can be acquired. delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch1, delayedShareFetchWatchKeys); @@ -570,15 +657,17 @@ public void testForceCompleteTriggersDelayedActionsQueue() { sharePartitions2.put(tp1, sp1); sharePartitions2.put(tp2, sp2); + Uuid fetchId2 = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch2 = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch2) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions2) .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) + .withFetchId(fetchId2) .build()); // sp1 can be acquired now - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId2)).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); @@ -664,8 +753,6 @@ public void testExceptionInMinBytesCalculation() { SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); @@ -691,6 +778,13 @@ public void testExceptionInMinBytesCalculation() { Time time = mock(Time.class); when(time.hiResClockMs()).thenReturn(100L).thenReturn(110L).thenReturn(170L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); + Uuid fetchId = Uuid.randomUuid(); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) @@ -699,8 +793,11 @@ public void testExceptionInMinBytesCalculation() { .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) .withShareGroupMetrics(shareGroupMetrics) .withTime(time) + .withFetchId(fetchId) .build()); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + // Try complete should return false as the share partition has errored out. assertFalse(delayedShareFetch.tryComplete()); // Fetch should remain pending and should be completed on request timeout. @@ -712,10 +809,10 @@ public void testExceptionInMinBytesCalculation() { Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); - Mockito.verify(sp0, times(1)).releaseFetchLock(); + Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); // Force complete the request as it's still pending. Return false from the share partition lock acquire. - when(sp0.maybeAcquireFetchLock()).thenReturn(false); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(false); assertTrue(delayedShareFetch.forceComplete()); assertTrue(delayedShareFetch.isCompleted()); @@ -741,7 +838,6 @@ public void testTryCompleteLocksReleasedOnCompleteException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); @@ -756,12 +852,17 @@ public void testTryCompleteLocksReleasedOnCompleteException() { mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) + .withFetchId(fetchId) .build()); + + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); // Throw exception for onComplete. doThrow(new RuntimeException()).when(delayedShareFetch).onComplete(); @@ -769,7 +870,7 @@ public void testTryCompleteLocksReleasedOnCompleteException() { assertFalse(delayedShareFetch.tryComplete()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); - Mockito.verify(sp0, times(1)).releaseFetchLock(); + Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); } @Test @@ -778,7 +879,6 @@ public void testLocksReleasedForCompletedFetch() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); LinkedHashMap sharePartitions1 = new LinkedHashMap<>(); @@ -794,18 +894,21 @@ public void testLocksReleasedForCompletedFetch() { PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions1) .withReplicaManager(replicaManager) .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) + .withFetchId(fetchId) .build(); DelayedShareFetch spy = spy(delayedShareFetch); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); doReturn(false).when(spy).forceComplete(); assertFalse(spy.tryComplete()); - Mockito.verify(sp0, times(1)).releaseFetchLock(); + Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } @@ -816,7 +919,6 @@ public void testLocksReleasedAcquireException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenThrow(new RuntimeException("Acquire exception")); LinkedHashMap sharePartitions = new LinkedHashMap<>(); @@ -826,13 +928,25 @@ public void testLocksReleasedAcquireException() { new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + Uuid fetchId = Uuid.randomUuid(); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + ReplicaManager replicaManager = mock(ReplicaManager.class); + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) + .withFetchId(fetchId) + .withReplicaManager(replicaManager) .build(); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.tryComplete()); - Mockito.verify(sp0, times(1)).releaseFetchLock(); + Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } @@ -843,7 +957,6 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); @@ -858,13 +971,17 @@ public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); when(partitionMaxBytesStrategy.maxBytes(anyInt(), any(), anyInt())).thenThrow(new IllegalArgumentException("Exception thrown")); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withExceptionHandler(mockExceptionHandler()) .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) + .withFetchId(fetchId) .build()); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertFalse(delayedShareFetch.isCompleted()); assertTrue(delayedShareFetch.tryComplete()); assertTrue(delayedShareFetch.isCompleted()); @@ -897,11 +1014,6 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab SharePartition sp3 = mock(SharePartition.class); SharePartition sp4 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); - when(sp3.maybeAcquireFetchLock()).thenReturn(true); - when(sp4.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(true); @@ -945,13 +1057,21 @@ public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirab mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp3, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp4, 1); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .withFetchId(fetchId) .build()); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp3.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp4.maybeAcquireFetchLock(fetchId)).thenReturn(true); + assertTrue(delayedShareFetch.tryComplete()); assertTrue(delayedShareFetch.isCompleted()); @@ -993,11 +1113,6 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab SharePartition sp3 = mock(SharePartition.class); SharePartition sp4 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - when(sp2.maybeAcquireFetchLock()).thenReturn(false); - when(sp3.maybeAcquireFetchLock()).thenReturn(true); - when(sp4.maybeAcquireFetchLock()).thenReturn(false); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); @@ -1032,13 +1147,21 @@ public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirab mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); + Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .withFetchId(fetchId) .build()); + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(false); + when(sp3.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp4.maybeAcquireFetchLock(fetchId)).thenReturn(false); + assertTrue(delayedShareFetch.tryComplete()); assertTrue(delayedShareFetch.isCompleted()); @@ -1155,6 +1278,866 @@ public void testOnCompleteExecutionOnTimeout() { assertEquals(1, delayedShareFetch.expiredRequestMeter().count()); } + @SuppressWarnings("unchecked") + @Test + public void testRemoteStorageFetchTryCompleteReturnsFalse() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + new CompletableFuture<>(), List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + when(sp1.nextFetchOffset()).thenReturn(20L); + when(sp2.nextFetchOffset()).thenReturn(30L); + + // Fetch offset matches with the cached entry for sp0 but not for sp1 and sp2. Hence, a replica manager fetch will happen for sp1 and sp2. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(10, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + // Mocking local log read result for tp1 and remote storage read result for tp2. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp1), Set.of(tp2))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock. + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + when(remoteLogManager.asyncRead(any(), any())).thenReturn(mock(Future.class)); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + Partition p2 = mock(Partition.class); + when(p2.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); + + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) + .withFetchId(fetchId) + .build()); + + // All the topic partitions are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + assertFalse(delayedShareFetch.isCompleted()); + assertFalse(delayedShareFetch.tryComplete()); + assertFalse(delayedShareFetch.isCompleted()); + // Remote fetch object gets created for delayed share fetch object. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // Verify the locks are released for local log read topic partitions tp0 and tp1. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1)); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + + @SuppressWarnings("unchecked") + @Test + public void testRemoteStorageFetchPartitionLeaderChanged() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + + SharePartition sp0 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + + // Fetch offset does not match with the cached entry for sp0, hence, a replica manager fetch will happen for sp0. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + // Mocking remote storage read result for tp0. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock. + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + when(remoteLogManager.asyncRead(any(), any())).thenReturn(mock(Future.class)); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(false); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0))) + .withFetchId(fetchId) + .build()); + + // All the topic partitions are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. + doAnswer(invocationOnMock -> { + TimerTask timerTask = invocationOnMock.getArgument(0); + timerTask.run(); + return null; + }).when(replicaManager).addShareFetchTimerRequest(any()); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + assertTrue(delayedShareFetch.isCompleted()); + // Remote fetch object gets created for delayed share fetch object. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // Verify the locks are released for local log read topic partitions tp0. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + + @SuppressWarnings("unchecked") + @Test + public void testRemoteStorageFetchTryCompleteThrowsException() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + when(sp1.nextFetchOffset()).thenReturn(20L); + when(sp2.nextFetchOffset()).thenReturn(25L); + + // Fetch offset does not match with the cached entry for sp0, sp1 and sp2. Hence, a replica manager fetch will happen for all. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + // Mocking local log read result for tp0 and remote storage read result for tp1 and tp2. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0), Set.of(tp1, tp2))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Exception will be thrown during the creation of remoteFetch object for tp2. + // remoteFetchTask gets created for tp1 successfully. + Future remoteFetchTask = mock(Future.class); + doAnswer(invocation -> { + when(remoteFetchTask.isCancelled()).thenReturn(true); + return false; + }).when(remoteFetchTask).cancel(false); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + when(remoteLogManager.asyncRead(any(), any())) + .thenReturn(remoteFetchTask) // for tp1 + .thenThrow(new RejectedExecutionException("Exception thrown")); // for tp2 + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + BiConsumer exceptionHandler = mockExceptionHandler(); + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withExceptionHandler(exceptionHandler) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) + .withFetchId(fetchId) + .build()); + + // All the topic partitions are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + assertFalse(delayedShareFetch.isCompleted()); + // tryComplete returns true and goes to forceComplete once the exception occurs. + assertTrue(delayedShareFetch.tryComplete()); + assertTrue(delayedShareFetch.isCompleted()); + // The future of shareFetch completes. + assertTrue(shareFetch.isCompleted()); + // The remoteFetchTask created for tp1 is cancelled successfully. + assertTrue(remoteFetchTask.isCancelled()); + assertFalse(future.isCompletedExceptionally()); + assertEquals(Set.of(tp1, tp2), future.join().keySet()); + // Exception occurred and was handled. + Mockito.verify(exceptionHandler, times(2)).accept(any(), any()); + // Verify the locks are released for all local and remote read topic partitions tp0, tp1 and tp2 because of exception occurrence. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp1, tp2)); + Mockito.verify(delayedShareFetch, times(1)).onComplete(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + + @SuppressWarnings("unchecked") + @Test + public void testRemoteStorageFetchTryCompletionDueToBrokerBecomingOffline() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + when(sp1.nextFetchOffset()).thenReturn(20L); + when(sp2.nextFetchOffset()).thenReturn(30L); + + // Fetch offset matches with the cached entry for sp0 but not for sp1 and sp2. Hence, a replica manager fetch will happen for sp1 and sp2 during tryComplete. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(10, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + // Mocking local log read result for tp1 and remote storage read result for tp2 on first replicaManager readFromLog call(from tryComplete). + // Mocking local log read result for tp0 and tp1 on second replicaManager readFromLog call(from onComplete). + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp1), Set.of(tp2)) + ).doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of()) + ).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock but the broker becomes unavailable. + Future remoteFetchTask = mock(Future.class); + doAnswer(invocation -> { + when(remoteFetchTask.isCancelled()).thenReturn(true); + return false; + }).when(remoteFetchTask).cancel(false); + + when(remoteFetchTask.cancel(false)).thenReturn(true); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + when(remoteLogManager.asyncRead(any(), any())).thenReturn(remoteFetchTask); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenThrow(mock(KafkaStorageException.class)); + + // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. + doAnswer(invocationOnMock -> { + TimerTask timerTask = invocationOnMock.getArgument(0); + timerTask.run(); + return null; + }).when(replicaManager).addShareFetchTimerRequest(any()); + + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) + .withFetchId(fetchId) + .build()); + + // All the topic partitions are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + + assertTrue(delayedShareFetch.isCompleted()); + // Pending remote fetch object gets created for delayed share fetch. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + List remoteFetches = delayedShareFetch.pendingRemoteFetches().remoteFetches(); + assertEquals(1, remoteFetches.size()); + assertTrue(remoteFetches.get(0).remoteFetchTask().isCancelled()); + // Partition locks should be released for all 3 topic partitions + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1, tp2)); + assertTrue(shareFetch.isCompleted()); + // Share fetch response contained tp0 and tp1 (local fetch) but not tp2, since it errored out. + assertEquals(Set.of(tp0, tp1), future.join().keySet()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + + @Test + public void testRemoteStorageFetchRequestCompletionOnFutureCompletionFailure() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(false); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + // Fetch offset does not match with the cached entry for sp0. Hence, a replica manager fetch will happen for sp0. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + // Mocking remote storage read result for tp0. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. + RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( + Optional.empty(), + Optional.of(new TimeoutException("Error occurred while creating remote fetch result")) // Remote fetch result is returned with an error. + ); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + doAnswer(invocationOnMock -> { + // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. + Consumer callback = invocationOnMock.getArgument(1); + callback.accept(remoteFetchResult); + return CompletableFuture.completedFuture(remoteFetchResult); + }).when(remoteLogManager).asyncRead(any(), any()); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Uuid fetchId = Uuid.randomUuid(); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1))) + .withFetchId(fetchId) + .build()); + + // sp0 is acquirable, sp1 is not acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(false); + + when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn( + createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + + assertTrue(delayedShareFetch.isCompleted()); + // Pending remote fetch object gets created for delayed share fetch. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // Verify the locks are released for tp0. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + assertTrue(shareFetch.isCompleted()); + assertEquals(Set.of(tp0), future.join().keySet()); + assertEquals(Errors.REQUEST_TIMED_OUT.code(), future.join().get(tp0).errorCode()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + + @Test + public void testRemoteStorageFetchRequestCompletionOnFutureCompletionSuccessfully() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + + SharePartition sp0 = mock(SharePartition.class); + + + when(sp0.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + // Fetch offset does not match with the cached entry for sp0. Hence, a replica manager fetch will happen for sp0. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + // Mocking remote storage read result for tp0. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. + RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( + Optional.of(REMOTE_FETCH_INFO), + Optional.empty() // Remote fetch result is returned successfully without error. + ); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + doAnswer(invocationOnMock -> { + // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. + Consumer callback = invocationOnMock.getArgument(1); + callback.accept(remoteFetchResult); + return CompletableFuture.completedFuture(remoteFetchResult); + }).when(remoteLogManager).asyncRead(any(), any()); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Uuid fetchId = Uuid.randomUuid(); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0))) + .withFetchId(fetchId) + .build()); + + // sp0 is acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + + assertTrue(delayedShareFetch.isCompleted()); + // Pending remote fetch object gets created for delayed share fetch. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // Verify the locks are released for tp0. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + assertTrue(shareFetch.isCompleted()); + assertEquals(Set.of(tp0), future.join().keySet()); + assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + + @Test + public void testRemoteStorageFetchRequestCompletionAlongWithLocalLogRead() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + when(sp1.nextFetchOffset()).thenReturn(20L); + when(sp2.nextFetchOffset()).thenReturn(30L); + + // Fetch offset does not match with the cached entry for sp0, sp1 and sp2. Hence, a replica manager fetch will happen for all of them in tryComplete. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); + partitionDataMap.put(tp2, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + // Mocking local log read result for tp0, tp1 and remote storage read result for tp2 on first replicaManager readFromLog call(from tryComplete). + // Mocking local log read result for tp0 and tp1 on second replicaManager readFromLog call(from onComplete). + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of(tp2)) + ).doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of()) + ).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. + RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( + Optional.of(REMOTE_FETCH_INFO), + Optional.empty() // Remote fetch result is returned successfully without error. + ); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + doAnswer(invocationOnMock -> { + // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. + Consumer callback = invocationOnMock.getArgument(1); + callback.accept(remoteFetchResult); + return CompletableFuture.completedFuture(remoteFetchResult); + }).when(remoteLogManager).asyncRead(any(), any()); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + Partition p2 = mock(Partition.class); + when(p2.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); + + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withReplicaManager(replicaManager) + .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) + .withFetchId(fetchId) + .build()); + + // All the topic partitions are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + + assertTrue(delayedShareFetch.isCompleted()); + // Pending remote fetch object gets created for delayed share fetch. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // the future of shareFetch completes. + assertTrue(shareFetch.isCompleted()); + assertEquals(Set.of(tp0, tp1, tp2), future.join().keySet()); + // Verify the locks are released for both local log and remote storage read topic partitions tp0, tp1 and tp2. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1, tp2)); + assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); + assertEquals(Errors.NONE.code(), future.join().get(tp1).errorCode()); + assertEquals(Errors.NONE.code(), future.join().get(tp2).errorCode()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + + @Test + public void testRemoteStorageFetchHappensForAllTopicPartitions() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + when(sp0.nextFetchOffset()).thenReturn(10L); + when(sp1.nextFetchOffset()).thenReturn(10L); + // Fetch offset does not match with the cached entry for sp0 and sp1. Hence, a replica manager fetch will happen for both. + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); + + LinkedHashSet remoteStorageFetchPartitions = new LinkedHashSet<>(); + remoteStorageFetchPartitions.add(tp0); + remoteStorageFetchPartitions.add(tp1); + + // Mocking remote storage read result for tp0 and tp1. + doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), remoteStorageFetchPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. + RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( + Optional.of(REMOTE_FETCH_INFO), + Optional.empty() // Remote fetch result is returned successfully without error. + ); + RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); + doAnswer(invocationOnMock -> { + // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. + Consumer callback = invocationOnMock.getArgument(1); + callback.accept(remoteFetchResult); + return CompletableFuture.completedFuture(remoteFetchResult); + }).when(remoteLogManager).asyncRead(any(), any()); + when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); + + Uuid fetchId = Uuid.randomUuid(); + + Partition p0 = mock(Partition.class); + when(p0.isLeader()).thenReturn(true); + + Partition p1 = mock(Partition.class); + when(p1.isLeader()).thenReturn(true); + + when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1))) + .withFetchId(fetchId) + .build()); + + // sp0 and sp1 are acquirable. + when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); + when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + + assertTrue(delayedShareFetch.isCompleted()); + // Pending remote fetch object gets created for delayed share fetch. + assertNotNull(delayedShareFetch.pendingRemoteFetches()); + // Verify the locks are released for both tp0 and tp1. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1)); + assertTrue(shareFetch.isCompleted()); + // Share fetch response contains both remote storage fetch topic partitions. + assertEquals(Set.of(tp0, tp1), future.join().keySet()); + assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); + assertEquals(Errors.NONE.code(), future.join().get(tp1).errorCode()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + + @Test + public void testRemoteStorageFetchCompletionPostRegisteringCallbackByPendingFetchesCompletion() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + SharePartition sp0 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp0.nextFetchOffset()).thenReturn(10L); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + PendingRemoteFetches pendingRemoteFetches = mock(PendingRemoteFetches.class); + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withReplicaManager(replicaManager) + .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .withPendingRemoteFetches(pendingRemoteFetches) + .withFetchId(fetchId) + .build()); + + LinkedHashMap partitionsAcquired = new LinkedHashMap<>(); + partitionsAcquired.put(tp0, 10L); + + // Manually update acquired partitions. + delayedShareFetch.updatePartitionsAcquired(partitionsAcquired); + + // Mock remote fetch result. + RemoteFetch remoteFetch = mock(RemoteFetch.class); + when(remoteFetch.topicIdPartition()).thenReturn(tp0); + when(remoteFetch.remoteFetchResult()).thenReturn(CompletableFuture.completedFuture( + new RemoteLogReadResult(Optional.of(REMOTE_FETCH_INFO), Optional.empty())) + ); + when(remoteFetch.logReadResult()).thenReturn(new LogReadResult( + REMOTE_FETCH_INFO, + Optional.empty(), + -1L, + -1L, + -1L, + -1L, + -1L, + OptionalLong.empty(), + OptionalInt.empty(), + Optional.empty() + )); + when(pendingRemoteFetches.remoteFetches()).thenReturn(List.of(remoteFetch)); + when(pendingRemoteFetches.isDone()).thenReturn(false); + + // Make sure that the callback is called to complete remote storage share fetch result. + doAnswer(invocationOnMock -> { + BiConsumer callback = invocationOnMock.getArgument(0); + callback.accept(mock(Void.class), null); + return null; + }).when(pendingRemoteFetches).invokeCallbackOnCompletion(any()); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + assertFalse(delayedShareFetch.isCompleted()); + delayedShareFetch.forceComplete(); + assertTrue(delayedShareFetch.isCompleted()); + // the future of shareFetch completes. + assertTrue(shareFetch.isCompleted()); + assertEquals(Set.of(tp0), future.join().keySet()); + // Verify the locks are released for tp0. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + assertTrue(delayedShareFetch.outsidePurgatoryCallbackLock()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + + @Test + public void testRemoteStorageFetchCompletionPostRegisteringCallbackByTimerTaskCompletion() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + SharePartition sp0 = mock(SharePartition.class); + + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp0.nextFetchOffset()).thenReturn(10L); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), + future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, + BROKER_TOPIC_STATS); + + PendingRemoteFetches pendingRemoteFetches = mock(PendingRemoteFetches.class); + Uuid fetchId = Uuid.randomUuid(); + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withReplicaManager(replicaManager) + .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .withPendingRemoteFetches(pendingRemoteFetches) + .withFetchId(fetchId) + .build()); + + LinkedHashMap partitionsAcquired = new LinkedHashMap<>(); + partitionsAcquired.put(tp0, 10L); + + // Manually update acquired partitions. + delayedShareFetch.updatePartitionsAcquired(partitionsAcquired); + + // Mock remote fetch result. + RemoteFetch remoteFetch = mock(RemoteFetch.class); + when(remoteFetch.topicIdPartition()).thenReturn(tp0); + when(remoteFetch.remoteFetchResult()).thenReturn(CompletableFuture.completedFuture( + new RemoteLogReadResult(Optional.of(REMOTE_FETCH_INFO), Optional.empty())) + ); + when(remoteFetch.logReadResult()).thenReturn(new LogReadResult( + REMOTE_FETCH_INFO, + Optional.empty(), + -1L, + -1L, + -1L, + -1L, + -1L, + OptionalLong.empty(), + OptionalInt.empty(), + Optional.empty() + )); + when(pendingRemoteFetches.remoteFetches()).thenReturn(List.of(remoteFetch)); + when(pendingRemoteFetches.isDone()).thenReturn(false); + + // Make sure that the callback to complete remote storage share fetch result is not called. + doAnswer(invocationOnMock -> null).when(pendingRemoteFetches).invokeCallbackOnCompletion(any()); + + // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. + doAnswer(invocationOnMock -> { + TimerTask timerTask = invocationOnMock.getArgument(0); + timerTask.run(); + return null; + }).when(replicaManager).addShareFetchTimerRequest(any()); + + try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { + Map partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); + mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); + + assertFalse(delayedShareFetch.isCompleted()); + delayedShareFetch.forceComplete(); + assertTrue(delayedShareFetch.isCompleted()); + // the future of shareFetch completes. + assertTrue(shareFetch.isCompleted()); + assertEquals(Set.of(tp0), future.join().keySet()); + // Verify the locks are released for tp0. + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); + assertTrue(delayedShareFetch.outsidePurgatoryCallbackLock()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + } + static void mockTopicIdPartitionToReturnDataEqualToMinBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, int minBytes) { LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, minBytes); LogOffsetSnapshot endOffsetSnapshot = new LogOffsetSnapshot(1, mock(LogOffsetMetadata.class), @@ -1182,11 +2165,43 @@ private PartitionMaxBytesStrategy mockPartitionMaxBytes(Set pa return partitionMaxBytesStrategy; } + private Seq> buildLocalAndRemoteFetchResult( + Set localLogReadTopicIdPartitions, + Set remoteReadTopicIdPartitions) { + List> logReadResults = new ArrayList<>(); + localLogReadTopicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( + new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.EMPTY), + Optional.empty(), + -1L, + -1L, + -1L, + -1L, + -1L, + OptionalLong.empty(), + OptionalInt.empty(), + Optional.empty() + )))); + remoteReadTopicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( + REMOTE_FETCH_INFO, + Optional.empty(), + -1L, + -1L, + -1L, + -1L, + -1L, + OptionalLong.empty(), + OptionalInt.empty(), + Optional.empty() + )))); + return CollectionConverters.asScala(logReadResults).toSeq(); + } + @SuppressWarnings("unchecked") private static BiConsumer mockExceptionHandler() { return mock(BiConsumer.class); } + @SuppressWarnings("unchecked") static class DelayedShareFetchBuilder { private ShareFetch shareFetch = mock(ShareFetch.class); private ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -1194,7 +2209,9 @@ static class DelayedShareFetchBuilder { private LinkedHashMap sharePartitions = mock(LinkedHashMap.class); private PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); private Time time = new MockTime(); + private Optional pendingRemoteFetches = Optional.empty(); private ShareGroupMetrics shareGroupMetrics = mock(ShareGroupMetrics.class); + private Uuid fetchId = Uuid.randomUuid(); DelayedShareFetchBuilder withShareFetchData(ShareFetch shareFetch) { this.shareFetch = shareFetch; @@ -1231,6 +2248,16 @@ private DelayedShareFetchBuilder withTime(Time time) { return this; } + private DelayedShareFetchBuilder withPendingRemoteFetches(PendingRemoteFetches pendingRemoteFetches) { + this.pendingRemoteFetches = Optional.of(pendingRemoteFetches); + return this; + } + + private DelayedShareFetchBuilder withFetchId(Uuid fetchId) { + this.fetchId = fetchId; + return this; + } + public static DelayedShareFetchBuilder builder() { return new DelayedShareFetchBuilder(); } @@ -1243,7 +2270,10 @@ public DelayedShareFetch build() { sharePartitions, partitionMaxBytesStrategy, shareGroupMetrics, - time); + time, + pendingRemoteFetches, + fetchId, + REMOTE_FETCH_MAX_WAIT_MS); } } } diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index d5acaef2060b8..e3a77158dafc4 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -45,9 +45,15 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.ArgumentsProvider; +import org.junit.jupiter.params.provider.ArgumentsSource; import org.mockito.Mockito; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -56,12 +62,12 @@ import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; +import java.util.stream.Stream; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createFileRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -329,7 +335,7 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); assertEquals(0, resultData.get(tp0).partitionIndex()); - assertNull(resultData.get(tp0).records()); + assertEquals(MemoryRecords.EMPTY, resultData.get(tp0).records()); assertTrue(resultData.get(tp0).acquiredRecords().isEmpty()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); @@ -344,7 +350,7 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); assertEquals(0, resultData.get(tp0).partitionIndex()); - assertNull(resultData.get(tp0).records()); + assertEquals(MemoryRecords.EMPTY, resultData.get(tp0).records()); assertTrue(resultData.get(tp0).acquiredRecords().isEmpty()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); @@ -463,11 +469,9 @@ public void testProcessFetchResponseWithOffsetFetchException() { Mockito.verify(sp0, times(0)).updateCacheAndOffsets(any(Long.class)); } - @Test - public void testMaybeSliceFetchRecordsSingleBatch() throws IOException { - // Create 1 batch of records with 10 records. - FileRecords records = createFileRecords(Map.of(5L, 10)); - + @ParameterizedTest(name = "{0}") + @ArgumentsSource(RecordsArgumentsProvider.class) + public void testMaybeSliceFetchRecordsSingleBatch(String name, Records records) { // Acquire all offsets, should return same records. List acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(5).setLastOffset(14).setDeliveryCount((short) 1)); Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 10)); @@ -499,15 +503,9 @@ public void testMaybeSliceFetchRecordsSingleBatch() throws IOException { assertEquals(records, slicedRecords); } - @Test - public void testMaybeSliceFetchRecordsMultipleBatches() throws IOException { - // Create 3 batches of records with 3, 2 and 4 records respectively. - LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); - recordsPerOffset.put(0L, 3); - recordsPerOffset.put(3L, 2); - recordsPerOffset.put(7L, 4); // Gap of 2 offsets between batches. - FileRecords records = createFileRecords(recordsPerOffset); - + @ParameterizedTest(name = "{0}") + @ArgumentsSource(MultipleBatchesRecordsArgumentsProvider.class) + public void testMaybeSliceFetchRecordsMultipleBatches(String name, Records records) { // Acquire all offsets, should return same records. List acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(0).setLastOffset(10).setDeliveryCount((short) 1)); Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 11)); @@ -618,10 +616,9 @@ public void testMaybeSliceFetchRecordsMultipleBatches() throws IOException { assertEquals(records.sizeInBytes(), slicedRecords.sizeInBytes()); } - @Test - public void testMaybeSliceFetchRecordsException() throws IOException { - // Create 1 batch of records with 3 records. - FileRecords records = createFileRecords(Map.of(0L, 3)); + @ParameterizedTest(name = "{0}") + @ArgumentsSource(MultipleBatchesRecordsArgumentsProvider.class) + public void testMaybeSliceFetchRecordsException(String name, Records records) { // Send empty acquired records which should trigger an exception and same file records should // be returned. The method doesn't expect empty acquired records. Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords( @@ -629,14 +626,41 @@ public void testMaybeSliceFetchRecordsException() throws IOException { assertEquals(records, slicedRecords); } - @Test - public void testMaybeSliceFetchRecordsNonFileRecords() { - // Send memory records which should be returned as is. - try (MemoryRecordsBuilder records = memoryRecordsBuilder(2, 0)) { - List acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(0).setLastOffset(1).setDeliveryCount((short) 1)); - Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords( - records.build(), new ShareAcquiredRecords(acquiredRecords, 2)); - assertEquals(records.build(), slicedRecords); + private static class RecordsArgumentsProvider implements ArgumentsProvider { + @Override + public Stream provideArguments(ExtensionContext context) throws Exception { + return Stream.of( + Arguments.of("FileRecords", createFileRecords(Map.of(5L, 10))), + Arguments.of("MemoryRecords", createMemoryRecords(5L, 10)) + ); + } + + private MemoryRecords createMemoryRecords(long baseOffset, int numRecords) { + try (MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(numRecords, baseOffset)) { + return recordsBuilder.build(); + } + } + } + + private static class MultipleBatchesRecordsArgumentsProvider implements ArgumentsProvider { + @Override + public Stream provideArguments(ExtensionContext context) throws Exception { + LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); + recordsPerOffset.put(0L, 3); + recordsPerOffset.put(3L, 2); + recordsPerOffset.put(7L, 4); // Gap of 2 offsets between batches. + return Stream.of( + Arguments.of("FileRecords", createFileRecords(recordsPerOffset)), + Arguments.of("MemoryRecords", createMemoryRecords(recordsPerOffset)) + ); + } + + private MemoryRecords createMemoryRecords(Map recordsPerOffset) { + ByteBuffer buffer = ByteBuffer.allocate(1024); + recordsPerOffset.forEach((offset, numOfRecords) -> memoryRecordsBuilder(buffer, numOfRecords, offset).close()); + buffer.flip(); + + return MemoryRecords.readableRecords(buffer); } } } diff --git a/core/src/test/java/kafka/server/share/SharePartitionCacheTest.java b/core/src/test/java/kafka/server/share/SharePartitionCacheTest.java new file mode 100644 index 0000000000000..0d33d95f65b62 --- /dev/null +++ b/core/src/test/java/kafka/server/share/SharePartitionCacheTest.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server.share; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.server.share.SharePartitionKey; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class SharePartitionCacheTest { + + private static final String GROUP_ID = "test-group"; + private static final Uuid TOPIC_ID = Uuid.randomUuid(); + private static final TopicIdPartition TOPIC_ID_PARTITION = new TopicIdPartition(TOPIC_ID, new TopicPartition("test-topic", 1)); + private static final SharePartitionKey SHARE_PARTITION_KEY = new SharePartitionKey(GROUP_ID, TOPIC_ID_PARTITION); + + private SharePartitionCache cache; + + @BeforeEach + public void setUp() { + cache = new SharePartitionCache(); + } + + @Test + public void testComputeIfAbsent() { + // Test computeIfAbsent when key doesn't exist + SharePartition sharePartition = Mockito.mock(SharePartition.class); + SharePartition newPartition = cache.computeIfAbsent(SHARE_PARTITION_KEY, key -> sharePartition); + + assertEquals(sharePartition, newPartition); + assertEquals(sharePartition, cache.get(SHARE_PARTITION_KEY)); + assertEquals(1, cache.groups().size()); + + // Test computeIfAbsent when key exists + SharePartition anotherPartition = Mockito.mock(SharePartition.class); + SharePartition existingPartition = cache.computeIfAbsent(SHARE_PARTITION_KEY, key -> anotherPartition); + assertEquals(sharePartition, existingPartition); + assertEquals(sharePartition, cache.get(SHARE_PARTITION_KEY)); + assertEquals(1, cache.groups().size()); + } + + @Test + public void testRemoveGroup() { + // Add partitions for multiple groups + String group1 = "group1"; + String group2 = "group2"; + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic1", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic2", 2)); + TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic3", 3)); + // Group1 with 2 partitions. + SharePartitionKey key1 = new SharePartitionKey(group1, tp1); + SharePartitionKey key2 = new SharePartitionKey(group1, tp2); + // Group2 with 1 partition. + SharePartitionKey key3 = new SharePartitionKey(group2, tp3); + SharePartition sp1 = Mockito.mock(SharePartition.class); + SharePartition sp2 = Mockito.mock(SharePartition.class); + SharePartition sp3 = Mockito.mock(SharePartition.class); + + // Test computeIfAbsent adds to group map + cache.computeIfAbsent(key1, k -> sp1); + cache.computeIfAbsent(key2, k -> sp2); + cache.computeIfAbsent(key3, k -> sp3); + + // Verify partitions are in the cache. + assertEquals(3, cache.size()); + assertTrue(cache.containsKey(key1)); + assertTrue(cache.containsKey(key2)); + assertTrue(cache.containsKey(key3)); + // Verify groups are in the group map. + assertEquals(2, cache.groups().size()); + assertTrue(cache.groups().containsKey(group1)); + assertTrue(cache.groups().containsKey(group2)); + // Verify topic partitions are in the group map. + assertEquals(2, cache.groups().get(group1).size()); + assertEquals(1, cache.groups().get(group2).size()); + assertEquals(1, cache.groups().get(group1).stream().filter(tp -> tp.equals(tp1)).count()); + assertEquals(1, cache.groups().get(group1).stream().filter(tp -> tp.equals(tp2)).count()); + assertEquals(1, cache.groups().get(group2).stream().filter(tp -> tp.equals(tp3)).count()); + + // Remove one group and verify only its partitions are removed. + cache.topicIdPartitionsForGroup(group1).forEach( + topicIdPartition -> cache.remove(new SharePartitionKey(group1, topicIdPartition))); + assertEquals(1, cache.size()); + assertTrue(cache.containsKey(key3)); + assertEquals(1, cache.groups().size()); + assertTrue(cache.groups().containsKey(group2)); + } +} \ No newline at end of file diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index a69c6c83071b2..24a84bab64a9b 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -17,7 +17,6 @@ package kafka.server.share; import kafka.cluster.Partition; -import kafka.server.LogReadResult; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; import kafka.server.share.SharePartitionManager.SharePartitionListener; @@ -26,6 +25,7 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.InvalidRecordStateException; @@ -34,6 +34,7 @@ import org.apache.kafka.common.errors.KafkaStorageException; import org.apache.kafka.common.errors.LeaderNotAvailableException; import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.errors.ShareSessionLimitReachedException; import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.message.ShareFetchResponseData; @@ -43,13 +44,15 @@ import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; -import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfigManager; +import org.apache.kafka.server.LogReadResult; +import org.apache.kafka.server.common.ShareVersion; import org.apache.kafka.server.purgatory.DelayedOperationKey; import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; import org.apache.kafka.server.share.CachedSharePartition; @@ -72,12 +75,13 @@ import org.apache.kafka.server.share.session.ShareSessionKey; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; -import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.FutureUtils; +import org.apache.kafka.server.util.MockTime; import org.apache.kafka.server.util.timer.MockTimer; import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.OffsetResultHolder; @@ -100,28 +104,25 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import scala.Option; import scala.Tuple2; import scala.collection.Seq; import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedListEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -129,8 +130,6 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -154,15 +153,18 @@ public class SharePartitionManagerTest { FetchRequest.ORDINARY_CONSUMER_ID, -1, DELAYED_SHARE_FETCH_MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); private static final String TIMER_NAME_PREFIX = "share-partition-manager"; + private static final String CONNECTION_ID = "id-1"; static final int DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL = 1000; + static final long REMOTE_FETCH_MAX_WAIT_MS = 6000L; - private Time time; + private MockTime time; private ReplicaManager mockReplicaManager; private BrokerTopicStats brokerTopicStats; private SharePartitionManager sharePartitionManager; private static final List EMPTY_PART_LIST = List.of(); + private static final List EMPTY_ACQUIRED_RECORDS = List.of(); @BeforeEach public void setUp() { @@ -171,7 +173,7 @@ public void setUp() { brokerTopicStats = new BrokerTopicStats(); mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); - when(mockReplicaManager.getPartitionOrException(Mockito.any())).thenReturn(partition); + when(mockReplicaManager.getPartitionOrException((TopicPartition) any())).thenReturn(partition); } @AfterEach @@ -185,7 +187,7 @@ public void tearDown() throws Exception { @Test public void testNewContextReturnsFinalContextWithoutRequestData() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -201,24 +203,23 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), List.of(), reqMetadata2, true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID); assertEquals(FinalContext.class, context2.getClass()); } @Test public void testNewContextReturnsFinalContextWithRequestData() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); Uuid tpId0 = Uuid.randomUuid(); - Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); @@ -229,22 +230,21 @@ public void testNewContextReturnsFinalContextWithRequestData() { List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. - // New context should be created successfully - List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, List.of(), reqMetadata2, true); + // Sending a Request with FINAL_EPOCH. This should return a FinalContext. + List reqData2 = List.of(tp0, tp1); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID); assertEquals(FinalContext.class, context2.getClass()); } @Test public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequestData() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -261,21 +261,106 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH - List reqData3 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); + List reqData2 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); assertInstanceOf(FinalContext.class, - sharePartitionManager.newContext(groupId, reqData3, List.of(), reqMetadata2, true)); + sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID)); + } + + @Test + public void testNewContextThrowsErrorWhenShareSessionNotFoundOnFinalEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); + assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext("grp", EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.FINAL_EPOCH), false, CONNECTION_ID)); + } + + @Test + public void testNewContextThrowsErrorWhenAcknowledgeDataPresentOnInitialEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + + assertThrows(InvalidRequestException.class, () -> sharePartitionManager.newContext("grp", List.of(tp0, tp1), EMPTY_PART_LIST, + new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH), true, CONNECTION_ID)); + } + + @Test + public void testNewContextThrowsErrorWhenShareSessionCacheIsFullOnInitialEpoch() { + // Define a cache with max size 1 + ShareSessionCache cache = new ShareSessionCache(1); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); + + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + + String groupId = "grp"; + Uuid memberId1 = Uuid.randomUuid(); + Uuid memberId2 = Uuid.randomUuid(); + + // Create a new share session with an initial share fetch request + List reqData = List.of(tp0, tp1); + + ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context1); + assertFalse(((ShareSessionContext) context1).isSubsequent()); + + // Trying to create a new share session, but since cache is already full, it should throw an exception + ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); + assertThrows(ShareSessionLimitReachedException.class, () -> sharePartitionManager.newContext("grp", reqData, EMPTY_PART_LIST, + reqMetadata2, false, "id-2")); + } + + @Test + public void testNewContextExistingSessionNewRequestWithInitialEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); + + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); + List reqData = List.of(tp0, tp1); + + ShareRequestMetadata reqMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); + + // Create a new share session with an initial share fetch request + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata, false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context1); + assertFalse(((ShareSessionContext) context1).isSubsequent()); + assertEquals(1, cache.size()); + + // Sending another request with INITIAL_EPOCH and same share session key. This should return a new ShareSessionContext + // and delete the older one. + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata, false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context2); + assertFalse(((ShareSessionContext) context1).isSubsequent()); + assertEquals(1, cache.size()); } @Test public void testNewContext() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -296,7 +381,7 @@ public void testNewContext() { List reqData2 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); @@ -315,16 +400,16 @@ public void testNewContext() { // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, "id-2")); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(memberId4, 1), true)); + new ShareRequestMetadata(memberId4, 1), true, "id-3")); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -342,18 +427,18 @@ public void testNewContext() { // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -373,97 +458,107 @@ public void testNewContext() { } @Test - public void testShareSessionExpiration() { - ShareSessionCache cache = new ShareSessionCache(2, 1000); + public void testAcknowledgeSessionUpdateThrowsOnInitialEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) - .withTime(time) .build(); - Map topicNames = new HashMap<>(); - Uuid fooId = Uuid.randomUuid(); - topicNames.put(fooId, "foo"); - TopicIdPartition foo0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); - - // Create a new share session, session 1 - List session1req = List.of(foo0, foo1); - - String groupId = "grp"; - ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - - ShareFetchContext session1context = sharePartitionManager.newContext(groupId, session1req, EMPTY_PART_LIST, reqMetadata1, false); - assertInstanceOf(ShareSessionContext.class, session1context); - - LinkedHashMap respData1 = new LinkedHashMap<>(); - respData1.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); - respData1.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); - - ShareFetchResponse session1resp = session1context.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); - assertEquals(Errors.NONE, session1resp.error()); - assertEquals(2, session1resp.responseData(topicNames).size()); - - ShareSessionKey session1Key = new ShareSessionKey(groupId, reqMetadata1.memberId()); - // check share session entered into cache - assertNotNull(cache.get(session1Key)); - - time.sleep(500); + assertThrows(InvalidShareSessionEpochException.class, + () -> sharePartitionManager.acknowledgeSessionUpdate("grp", + new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH))); + } - // Create a second new share session - List session2req = List.of(foo0, foo1); + @Test + public void testAcknowledgeSessionUpdateThrowsWhenShareSessionNotFound() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); - ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); + // The share session corresponding to this memberId has not been created yet. This should throw an exception. + assertThrows(ShareSessionNotFoundException.class, + () -> sharePartitionManager.acknowledgeSessionUpdate("grp", + new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)))); + } - ShareFetchContext session2context = sharePartitionManager.newContext(groupId, session2req, EMPTY_PART_LIST, reqMetadata2, false); - assertInstanceOf(ShareSessionContext.class, session2context); + @Test + public void testAcknowledgeSessionUpdateThrowsInvalidShareSessionEpochException() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); - LinkedHashMap respData2 = new LinkedHashMap<>(); - respData2.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); - respData2.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - ShareFetchResponse session2resp = session2context.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); - assertEquals(Errors.NONE, session2resp.error()); - assertEquals(2, session2resp.responseData(topicNames).size()); + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); - ShareSessionKey session2Key = new ShareSessionKey(groupId, reqMetadata2.memberId()); + // Create a new share session with an initial share fetch request + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, + new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context1); + assertFalse(((ShareSessionContext) context1).isSubsequent()); - // both newly created entries are present in cache - assertNotNull(cache.get(session1Key)); - assertNotNull(cache.get(session2Key)); + // The expected epoch from the share session should be 1, but we are passing 2. This should throw an exception. + assertThrows(InvalidShareSessionEpochException.class, + () -> sharePartitionManager.acknowledgeSessionUpdate("grp", + new ShareRequestMetadata(memberId, + ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH))))); + } - time.sleep(500); + @Test + public void testAcknowledgeSessionUpdateSuccessOnSubsequentEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); - // Create a subsequent share fetch context for session 1 - ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); - assertInstanceOf(ShareSessionContext.class, session1context2); + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - // total sleep time will now be large enough that share session 1 will be evicted if not correctly touched - time.sleep(501); + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); - // create one final share session to test that the least recently used entry is evicted - // the second share session should be evicted because the first share session was incrementally fetched - // more recently than the second session was created - List session3req = List.of(foo0, foo1); + // Create a new share session with an initial share fetch request + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, + new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context1); + assertFalse(((ShareSessionContext) context1).isSubsequent()); - ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); + // The expected epoch from the share session should be 1, and we are passing the same. So, execution should be successful. + assertDoesNotThrow( + () -> sharePartitionManager.acknowledgeSessionUpdate("grp", + new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)))); + } - ShareFetchContext session3context = sharePartitionManager.newContext(groupId, session3req, EMPTY_PART_LIST, reqMetadata3, false); + @Test + public void testAcknowledgeSessionUpdateSuccessOnFinalEpoch() { + ShareSessionCache cache = new ShareSessionCache(10); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .build(); - LinkedHashMap respData3 = new LinkedHashMap<>(); - respData3.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); - respData3.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); + Uuid tpId0 = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - ShareFetchResponse session3resp = session3context.updateAndGenerateResponseData(groupId, reqMetadata3.memberId(), respData3); - assertEquals(Errors.NONE, session3resp.error()); - assertEquals(2, session3resp.responseData(topicNames).size()); + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); - ShareSessionKey session3Key = new ShareSessionKey(groupId, reqMetadata3.memberId()); + // Create a new share session with an initial share fetch request + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, + new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); + assertInstanceOf(ShareSessionContext.class, context1); + assertFalse(((ShareSessionContext) context1).isSubsequent()); - assertNotNull(cache.get(session1Key)); - assertNull(cache.get(session2Key), "share session 2 should have been evicted by latest share session, " + - "as share session 1 was used more recently"); - assertNotNull(cache.get(session3Key)); + // The expected epoch from the share session should be 1, but we are passing the Final Epoch (-1). This should throw an exception. + assertDoesNotThrow( + () -> sharePartitionManager.acknowledgeSessionUpdate("grp", + new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH))); } @Test @@ -484,7 +579,7 @@ public void testSubsequentShareSession() { String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); LinkedHashMap respData1 = new LinkedHashMap<>(); @@ -500,7 +595,7 @@ public void testSubsequentShareSession() { List removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); Set expectedTopicIdPartitions2 = new HashSet<>(); @@ -522,15 +617,15 @@ public void testSubsequentShareSession() { ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(1, resp2.data().responses().size()); - assertEquals(barId, resp2.data().responses().get(0).topicId()); - assertEquals(1, resp2.data().responses().get(0).partitions().size()); - assertEquals(0, resp2.data().responses().get(0).partitions().get(0).partitionIndex()); + assertEquals(barId, resp2.data().responses().stream().findFirst().get().topicId()); + assertEquals(1, resp2.data().responses().stream().findFirst().get().partitions().size()); + assertEquals(0, resp2.data().responses().stream().findFirst().get().partitions().get(0).partitionIndex()); assertEquals(1, resp2.responseData(topicNames).size()); } @Test public void testZeroSizeShareSession() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -547,7 +642,7 @@ public void testZeroSizeShareSession() { String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); LinkedHashMap respData1 = new LinkedHashMap<>(); @@ -563,8 +658,8 @@ public void testZeroSizeShareSession() { List removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), removed2, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, removed2, + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); LinkedHashMap respData2 = new LinkedHashMap<>(); @@ -576,7 +671,7 @@ public void testZeroSizeShareSession() { @Test public void testToForgetPartitions() { String groupId = "grp"; - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -590,23 +685,23 @@ public void testToForgetPartitions() { List reqData1 = List.of(foo, bar); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertPartitionsPresent((ShareSessionContext) context1, List.of(foo, bar)); mockUpdateAndGenerateResponseData(context1, groupId, reqMetadata1.memberId()); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), List.of(foo), - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(foo), + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); // So foo is removed but not the others. assertPartitionsPresent((ShareSessionContext) context2, List.of(bar)); mockUpdateAndGenerateResponseData(context2, groupId, reqMetadata1.memberId()); - ShareFetchContext context3 = sharePartitionManager.newContext(groupId, List.of(), List.of(bar), - new ShareRequestMetadata(reqMetadata1.memberId(), 2), true); - assertPartitionsPresent((ShareSessionContext) context3, List.of()); + ShareFetchContext context3 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(bar), + new ShareRequestMetadata(reqMetadata1.memberId(), 2), true, CONNECTION_ID); + assertPartitionsPresent((ShareSessionContext) context3, EMPTY_PART_LIST); } // This test simulates a share session where the topic ID changes broker side (the one handling the request) in both the metadata cache and the log @@ -614,7 +709,7 @@ public void testToForgetPartitions() { @Test public void testShareSessionUpdateTopicIdsBrokerSide() { String groupId = "grp"; - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -632,7 +727,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { List reqData1 = List.of(foo, bar); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); @@ -647,8 +742,8 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertTrue(((ShareSessionContext) context2).isSubsequent()); @@ -665,7 +760,7 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { @Test public void testGetErroneousAndValidTopicIdPartitions() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -682,7 +777,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { List reqData2 = List.of(tp0, tp1, tpNull1); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); assertErroneousAndValidTopicIdPartitions(context2.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1), List.of(tp0, tp1)); @@ -704,15 +799,15 @@ public void testGetErroneousAndValidTopicIdPartitions() { // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); // Test trying to create a new session with a non-existent session key assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(Uuid.randomUuid(), 1), true)); + new ShareRequestMetadata(Uuid.randomUuid(), 1), true, CONNECTION_ID)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -723,12 +818,12 @@ public void testGetErroneousAndValidTopicIdPartitions() { // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session List reqData7 = List.of(tpNull2); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); // Check for throttled response ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); @@ -737,12 +832,12 @@ public void testGetErroneousAndValidTopicIdPartitions() { assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1, tpNull2), List.of(tp0, tp1)); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); - assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), List.of(), List.of()); + assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), EMPTY_PART_LIST, EMPTY_PART_LIST); // Check for throttled response ShareFetchResponse resp8 = context8.throttleResponse(100); assertEquals(Errors.NONE, resp8.error()); @@ -758,7 +853,7 @@ public void testGetErroneousAndValidTopicIdPartitions() { @Test public void testShareFetchContextResponseSize() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -783,7 +878,7 @@ public void testShareFetchContextResponseSize() { short version = ApiKeys.SHARE_FETCH.latestVersion(); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); @@ -803,17 +898,17 @@ public void testShareFetchContextResponseSize() { // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(memberId4, 1), true)); + new ShareRequestMetadata(memberId4, 1), true, CONNECTION_ID)); // Continue the first share session we created. List reqData5 = List.of(tp2); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context5); assertTrue(((ShareSessionContext) context5).isSubsequent()); @@ -828,11 +923,11 @@ public void testShareFetchContextResponseSize() { // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); int respSize7 = context7.responseSize(respData2, version); ShareFetchResponse resp7 = context7.throttleResponse(100); @@ -842,8 +937,8 @@ public void testShareFetchContextResponseSize() { assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -859,7 +954,7 @@ public void testShareFetchContextResponseSize() { @Test public void testCachedTopicPartitionsWithNoTopicPartitions() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); + ShareSessionCache cache = new ShareSessionCache(10); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -870,10 +965,7 @@ public void testCachedTopicPartitionsWithNoTopicPartitions() { @Test public void testCachedTopicPartitionsForValidShareSessions() { - ShareSessionCache cache = new ShareSessionCache(10, 1000); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + ShareSessionCache cache = new ShareSessionCache(10); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); @@ -884,12 +976,30 @@ public void testCachedTopicPartitionsForValidShareSessions() { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); Uuid memberId2 = Uuid.randomUuid(); + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + when(sp0.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); + when(sp1.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); + when(sp2.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); + + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .withPartitionCache(partitionCache) + .build(); + // Create a new share session with an initial share fetch request. List reqData1 = List.of(tp0, tp1); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context1); assertFalse(((ShareSessionContext) context1).isSubsequent()); @@ -903,14 +1013,14 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); assertEquals(Errors.NONE, resp1.error()); - assertEquals(new HashSet<>(List.of(tp0, tp1)), + assertEquals(Set.of(tp0, tp1), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. List reqData2 = List.of(tp2); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context2); assertFalse(((ShareSessionContext) context2).isSubsequent()); @@ -928,7 +1038,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { // Continue the first share session we created. List reqData3 = List.of(tp2); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true); + new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context3); assertTrue(((ShareSessionContext) context3).isSubsequent()); @@ -937,13 +1047,13 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp3 = context3.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData3); assertEquals(Errors.NONE, resp3.error()); - assertEquals(new HashSet<>(List.of(tp0, tp1, tp2)), + assertEquals(Set.of(tp0, tp1, tp2), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. List reqData4 = List.of(tp3); ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, List.of(tp2), - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context4); assertTrue(((ShareSessionContext) context4).isSubsequent()); @@ -955,8 +1065,8 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertEquals(List.of(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, List.of(), EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); assertEquals(FinalContext.class, context5.getClass()); LinkedHashMap respData5 = new LinkedHashMap<>(); @@ -970,8 +1080,8 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . - ShareFetchContext context6 = sharePartitionManager.newContext(groupId, List.of(), List.of(tp3), - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); + ShareFetchContext context6 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(tp3), + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); assertInstanceOf(ShareSessionContext.class, context6); assertTrue(((ShareSessionContext) context6).isSubsequent()); @@ -979,7 +1089,7 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp6 = context6.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData6); assertEquals(Errors.NONE, resp6.error()); - assertEquals(List.of(), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); + assertEquals(EMPTY_PART_LIST, sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); } @Test @@ -1070,142 +1180,6 @@ public void testMultipleSequentialShareFetches() { ); } - @Test - public void testMultipleConcurrentShareFetches() throws InterruptedException { - String groupId = "grp"; - Uuid memberId1 = Uuid.randomUuid(); - Uuid fooId = Uuid.randomUuid(); - Uuid barId = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); - TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); - List topicIdPartitions = List.of(tp0, tp1, tp2, tp3); - - mockFetchOffsetForTimestamp(mockReplicaManager); - - Timer mockTimer = systemTimerReaper(); - DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( - "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); - mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp2, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp3, 1); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - SharePartition sp3 = mock(SharePartition.class); - - // Mock the share partitions corresponding to the topic partitions. - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); - // Mock the share partitions to get initialized instantaneously without any error. - when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp2.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp3.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - // Required mocks so that the share partitions can acquire record. - when(sp0.maybeAcquireFetchLock()).thenReturn(true); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); - when(sp3.maybeAcquireFetchLock()).thenReturn(true); - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - when(sp3.canAcquireRecords()).thenReturn(true); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp2.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp3.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - // Mocks to have fetch offset metadata match for share partitions to avoid any extra calls to replicaManager.readFromLog. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); - when(sp3.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); - - // Mock nextFetchOffset() functionality for share partitions to reflect the moving fetch of share partitions. - when(sp0.nextFetchOffset()).thenReturn((long) 1, (long) 15, (long) 6, (long) 30, (long) 25); - when(sp1.nextFetchOffset()).thenReturn((long) 4, (long) 1, (long) 18, (long) 5); - when(sp2.nextFetchOffset()).thenReturn((long) 10, (long) 25, (long) 26); - when(sp3.nextFetchOffset()).thenReturn((long) 20, (long) 15, (long) 23, (long) 16); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withReplicaManager(mockReplicaManager) - .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) - .withPartitionCacheMap(partitionCacheMap) - .build(); - - doAnswer(invocation -> { - assertEquals(1, sp0.nextFetchOffset()); - assertEquals(4, sp1.nextFetchOffset()); - assertEquals(10, sp2.nextFetchOffset()); - assertEquals(20, sp3.nextFetchOffset()); - return buildLogReadResult(topicIdPartitions); - }).doAnswer(invocation -> { - assertEquals(15, sp0.nextFetchOffset()); - assertEquals(1, sp1.nextFetchOffset()); - assertEquals(25, sp2.nextFetchOffset()); - assertEquals(15, sp3.nextFetchOffset()); - return buildLogReadResult(topicIdPartitions); - }).doAnswer(invocation -> { - assertEquals(6, sp0.nextFetchOffset()); - assertEquals(18, sp1.nextFetchOffset()); - assertEquals(26, sp2.nextFetchOffset()); - assertEquals(23, sp3.nextFetchOffset()); - return buildLogReadResult(topicIdPartitions); - }).doAnswer(invocation -> { - assertEquals(30, sp0.nextFetchOffset()); - assertEquals(5, sp1.nextFetchOffset()); - assertEquals(26, sp2.nextFetchOffset()); - assertEquals(16, sp3.nextFetchOffset()); - return buildLogReadResult(topicIdPartitions); - }).doAnswer(invocation -> { - assertEquals(25, sp0.nextFetchOffset()); - assertEquals(5, sp1.nextFetchOffset()); - assertEquals(26, sp2.nextFetchOffset()); - assertEquals(16, sp3.nextFetchOffset()); - return buildLogReadResult(topicIdPartitions); - }).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - int threadCount = 100; - ExecutorService executorService = Executors.newFixedThreadPool(threadCount); - - FetchParams fetchParams = new FetchParams( - FetchRequest.ORDINARY_CONSUMER_ID, -1, 200, - 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); - - try { - for (int i = 0; i != threadCount; ++i) { - executorService.submit(() -> { - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), fetchParams, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - }); - // We are blocking the main thread at an interval of 10 threads so that the currently running executorService threads can complete. - if (i % 10 == 0) - executorService.awaitTermination(50, TimeUnit.MILLISECONDS); - } - } finally { - if (!executorService.awaitTermination(50, TimeUnit.MILLISECONDS)) - executorService.shutdown(); - } - // We are checking the number of replicaManager readFromLog() calls - Mockito.verify(mockReplicaManager, atMost(100)).readFromLog( - any(), any(), any(ReplicaQuota.class), anyBoolean()); - Mockito.verify(mockReplicaManager, atLeast(10)).readFromLog( - any(), any(), any(ReplicaQuota.class), anyBoolean()); - } - @Test public void testReplicaManagerFetchShouldNotProceed() { String groupId = "grp"; @@ -1215,11 +1189,11 @@ public void testReplicaManagerFetchShouldNotProceed() { List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( @@ -1228,7 +1202,7 @@ public void testReplicaManagerFetchShouldNotProceed() { mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -1332,13 +1306,13 @@ public void testReleaseSessionSuccess() { partitionMap.add(new CachedSharePartition(tp3)); when(shareSession.partitionMap()).thenReturn(partitionMap); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -1354,7 +1328,7 @@ public void testReleaseSessionSuccess() { assertEquals(2, result.get(tp2).partitionIndex()); assertEquals(Errors.INVALID_RECORD_STATE.code(), result.get(tp2).errorCode()); assertEquals("Unable to release acquired records for the batch", result.get(tp2).errorMessage()); - // tp3 was not a part of partitionCacheMap. + // tp3 was not a part of partitionCache. assertEquals(4, result.get(tp3).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp3).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp3).errorMessage()); @@ -1475,11 +1449,11 @@ public void testAcknowledgeSinglePartition() { when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp), sp); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -1520,14 +1494,14 @@ public void testAcknowledgeMultiplePartition() throws Exception { when(sp2.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); when(sp3.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withShareGroupMetrics(shareGroupMetrics) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -1593,13 +1567,13 @@ public void testAcknowledgeIndividualOffsets() throws Exception { when(sp1.acknowledge(memberId, ack1)).thenReturn(CompletableFuture.completedFuture(null)); when(sp2.acknowledge(memberId, ack2)).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withShareGroupMetrics(shareGroupMetrics) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -1635,11 +1609,11 @@ public void testAcknowledgeIncorrectGroupId() { TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp), sp); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .withShareGroupMetrics(shareGroupMetrics) .build(); @@ -1679,10 +1653,10 @@ public void testAcknowledgeIncorrectMemberId() { when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(FutureUtils.failedFuture( new InvalidRequestException("Member is not the owner of batch record") )); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp), sp); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -1761,9 +1735,9 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).acknowledge(ArgumentMatchers.eq(memberId), any()); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, @@ -1784,9 +1758,9 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 2); // Initially you cannot acquire records for both sp1 and sp2. - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); when(sp2.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); @@ -1795,7 +1769,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -1817,7 +1791,7 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); - doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); Map> acknowledgeTopics = new HashMap<>(); acknowledgeTopics.put(tp1, List.of( @@ -1871,10 +1845,10 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).acknowledge(ArgumentMatchers.eq(memberId), any()); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, @@ -1893,18 +1867,18 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); - when(sp3.maybeAcquireFetchLock()).thenReturn(true); + when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -1961,6 +1935,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); + SharePartition sp3 = mock(SharePartition.class); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); @@ -1975,10 +1950,12 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); + when(sp3.releaseAcquiredRecords(ArgumentMatchers.eq(memberId))).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, @@ -1999,16 +1976,16 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); // Initially you cannot acquire records for both sp1 and sp2. - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) @@ -2034,7 +2011,7 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(List.of(tp1, tp3)); doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(List.of(), 0)); + when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); // Release acquired records on session close request for tp1 and tp3. sharePartitionManager.releaseSession(groupId, memberId); @@ -2081,10 +2058,10 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( FETCH_PARAMS, @@ -2103,18 +2080,18 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); - when(sp3.maybeAcquireFetchLock()).thenReturn(true); + when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); sharePartitionManager = spy(SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) @@ -2162,8 +2139,8 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the initialization future pending, so fetch request is stuck. CompletableFuture pendingInitializationFuture = new CompletableFuture<>(); @@ -2180,7 +2157,7 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti when(time.hiResClockMs()).thenReturn(100L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTime(time) .withShareGroupMetrics(shareGroupMetrics) @@ -2227,9 +2204,9 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); // Keep the initialization future pending, so fetch request is stuck. CompletableFuture pendingInitializationFuture1 = new CompletableFuture<>(); @@ -2250,7 +2227,7 @@ public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception when(time.hiResClockMs()).thenReturn(100L); ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTime(time) .withShareGroupMetrics(shareGroupMetrics) @@ -2295,8 +2272,8 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the 2 initialization futures pending and 1 completed with leader not available exception. CompletableFuture pendingInitializationFuture1 = new CompletableFuture<>(); @@ -2313,7 +2290,7 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { mockReplicaManagerDelayedShareFetch(mockReplicaManager, shareFetchPurgatorySpy); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -2369,8 +2346,8 @@ public void testSharePartitionInitializationExceptions() throws Exception { List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( @@ -2379,7 +2356,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -2400,7 +2377,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { assertTrue(future.join().isEmpty()); Mockito.verify(sp0, times(0)).markFenced(); // Verify that the share partition is still in the cache on LeaderNotAvailableException. - assertEquals(1, partitionCacheMap.size()); + assertEquals(1, partitionCache.size()); // Return IllegalStateException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new IllegalStateException("Illegal state"))); @@ -2412,10 +2389,10 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Illegal state"); Mockito.verify(sp0, times(1)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return CoordinatorNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new CoordinatorNotAvailableException("Coordinator not available"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, @@ -2426,10 +2403,10 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.COORDINATOR_NOT_AVAILABLE, "Coordinator not available"); Mockito.verify(sp0, times(2)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return InvalidRequestException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new InvalidRequestException("Invalid request"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, @@ -2440,10 +2417,10 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.INVALID_REQUEST, "Invalid request"); Mockito.verify(sp0, times(3)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return FencedStateEpochException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new FencedStateEpochException("Fenced state epoch"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, @@ -2454,10 +2431,10 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced state epoch"); Mockito.verify(sp0, times(4)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return NotLeaderOrFollowerException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new NotLeaderOrFollowerException("Not leader or follower"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, @@ -2468,10 +2445,10 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Not leader or follower"); Mockito.verify(sp0, times(5)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Return RuntimeException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new RuntimeException("Runtime exception"))); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, @@ -2482,7 +2459,7 @@ public void testSharePartitionInitializationExceptions() throws Exception { () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Runtime exception"); Mockito.verify(sp0, times(6)).markFenced(); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // Should have 7 fetch recorded and 6 failures as 1 fetch was waiting on initialization and // didn't error out. validateBrokerTopicStatsMetrics( @@ -2493,19 +2470,18 @@ public void testSharePartitionInitializationExceptions() throws Exception { } @Test - @SuppressWarnings("unchecked") public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); List topicIdPartitions = List.of(tp0); - Map partitionCacheMap = (Map) mock(Map.class); + SharePartitionCache partitionCache = mock(SharePartitionCache.class); // Throw the exception for first fetch request. Return share partition for next. - when(partitionCacheMap.computeIfAbsent(any(), any())) + when(partitionCache.computeIfAbsent(any(), any())) .thenThrow(new RuntimeException("Error creating instance")); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -2532,7 +2508,7 @@ public void testSharePartitionInitializationFailure() throws Exception { List topicIdPartitions = List.of(tp0); // Send map to check no share partition is created. - Map partitionCacheMap = new HashMap<>(); + SharePartitionCache partitionCache = new SharePartitionCache(); // Validate when partition is not the leader. Partition partition = mock(Partition.class); when(partition.isLeader()).thenReturn(false); @@ -2540,12 +2516,12 @@ public void testSharePartitionInitializationFailure() throws Exception { ReplicaManager replicaManager = mock(ReplicaManager.class); // First check should throw KafkaStorageException, second check should return partition which // is not leader. - when(replicaManager.getPartitionOrException(any())) + when(replicaManager.getPartitionOrException(any(TopicPartition.class))) .thenThrow(new KafkaStorageException("Exception")) .thenReturn(partition); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .build(); @@ -2558,7 +2534,7 @@ public void testSharePartitionInitializationFailure() throws Exception { DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.KAFKA_STORAGE_ERROR, "Exception"); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // Validate when partition is not leader. future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, @@ -2568,7 +2544,7 @@ public void testSharePartitionInitializationFailure() throws Exception { DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // Should have 2 fetch recorded and 2 failure. validateBrokerTopicStatsMetrics( brokerTopicStats, @@ -2593,22 +2569,22 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { Partition partition0 = mock(Partition.class); when(partition0.isLeader()).thenReturn(false); ReplicaManager replicaManager = mock(ReplicaManager.class); - when(replicaManager.getPartitionOrException(any())) + when(replicaManager.getPartitionOrException(any(TopicPartition.class))) .thenReturn(partition0); // Mock share partition for tp1, so it can succeed. SharePartition sp1 = mock(SharePartition.class); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(List.of(), 0)); + when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); // Fail initialization for tp2. SharePartition sp2 = mock(SharePartition.class); - partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); when(sp2.maybeInitialize()).thenReturn(CompletableFuture.failedFuture(new FencedStateEpochException("Fenced state epoch"))); Timer mockTimer = systemTimerReaper(); @@ -2623,7 +2599,7 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withBrokerTopicStats(brokerTopicStats) .withTimer(mockTimer) .build(); @@ -2645,7 +2621,8 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { assertEquals(Errors.FENCED_STATE_EPOCH.code(), partitionDataMap.get(tp2).errorCode()); assertEquals("Fenced state epoch", partitionDataMap.get(tp2).errorMessage()); - Mockito.verify(replicaManager, times(0)).completeDelayedShareFetchRequest(any()); + Mockito.verify(replicaManager, times(1)).completeDelayedShareFetchRequest( + new DelayedShareFetchGroupKey(groupId, tp2)); Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); // Should have 1 fetch recorded and 1 failure as single topic has multiple partition fetch @@ -2665,11 +2642,11 @@ public void testReplicaManagerFetchException() { List topicIdPartitions = List.of(tp0); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( @@ -2680,7 +2657,7 @@ public void testReplicaManagerFetchException() { doThrow(new RuntimeException("Exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -2691,7 +2668,7 @@ public void testReplicaManagerFetchException() { MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Exception"); // Verify that the share partition is still in the cache on exception. - assertEquals(1, partitionCacheMap.size()); + assertEquals(1, partitionCache.size()); // Throw NotLeaderOrFollowerException from replica manager fetch which should evict instance from the cache. doThrow(new NotLeaderOrFollowerException("Leader exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -2699,7 +2676,7 @@ public void testReplicaManagerFetchException() { future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Leader exception"); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // Should have 2 fetch recorded and 2 failures. validateBrokerTopicStatsMetrics( brokerTopicStats, @@ -2718,19 +2695,19 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { List topicIdPartitions = List.of(tp0, tp1); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartition sp1 = mock(SharePartition.class); // Do not make the share partition acquirable hence it shouldn't be removed from the cache, // as it won't be part of replica manager readFromLog request. - when(sp1.maybeAcquireFetchLock()).thenReturn(false); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(false); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - Map partitionCacheMap = new HashMap<>(); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( @@ -2742,7 +2719,7 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { doThrow(new FencedStateEpochException("Fenced exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCacheMap(partitionCacheMap) + .withPartitionCache(partitionCache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) .withBrokerTopicStats(brokerTopicStats) @@ -2753,21 +2730,21 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced exception"); // Verify that tp1 is still in the cache on exception. - assertEquals(1, partitionCacheMap.size()); - assertEquals(sp1, partitionCacheMap.get(new SharePartitionKey(groupId, tp1))); + assertEquals(1, partitionCache.size()); + assertEquals(sp1, partitionCache.get(new SharePartitionKey(groupId, tp1))); // Make sp1 acquirable and add sp0 back in partition cache. Both share partitions should be // removed from the cache. - when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); - partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception again")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); validateShareFetchFutureException(future, List.of(tp0, tp1), Errors.FENCED_STATE_EPOCH, "Fenced exception again"); - assertTrue(partitionCacheMap.isEmpty()); + assertTrue(partitionCache.isEmpty()); // Should have 4 fetch recorded (2 fetch and 2 topics) and 3 failures as sp1 was not acquired // in first fetch and shall have empty response. Similarly, tp0 should record 2 failures and // tp1 should record 1 failure. @@ -2789,7 +2766,7 @@ public void testListenerRegistration() { ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); - when(mockReplicaManager.getPartitionOrException(Mockito.any())).thenReturn(partition); + when(mockReplicaManager.getPartitionOrException((TopicPartition) Mockito.any())).thenReturn(partition); sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) @@ -2814,33 +2791,33 @@ public void testListenerRegistration() { public void testSharePartitionListenerOnFailed() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - Map partitionCacheMap = new HashMap<>(); + SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); - testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onFailed); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); + testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onFailed); } @Test public void testSharePartitionListenerOnDeleted() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - Map partitionCacheMap = new HashMap<>(); + SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); - testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onDeleted); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); + testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onDeleted); } @Test public void testSharePartitionListenerOnBecomingFollower() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - Map partitionCacheMap = new HashMap<>(); + SharePartitionCache partitionCache = new SharePartitionCache(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); - testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onBecomingFollower); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); + testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onBecomingFollower); } @Test @@ -2899,6 +2876,180 @@ public void testFetchMessagesRotatePartitions() { validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); } + @Test + public void testCreateIdleShareFetchTask() throws Exception { + ReplicaManager replicaManager = mock(ReplicaManager.class); + + MockTimer mockTimer = new MockTimer(time); + long maxWaitMs = 1000L; + + // Set up the mock to capture and add the timer task + Mockito.doAnswer(invocation -> { + TimerTask timerTask = invocation.getArgument(0); + mockTimer.add(timerTask); + return null; + }).when(replicaManager).addShareFetchTimerRequest(Mockito.any(TimerTask.class)); + + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withReplicaManager(replicaManager) + .withTime(time) + .withTimer(mockTimer) + .build(); + + CompletableFuture future = sharePartitionManager.createIdleShareFetchTimerTask(maxWaitMs); + // Future should not be completed immediately + assertFalse(future.isDone()); + + mockTimer.advanceClock(maxWaitMs / 2); + assertFalse(future.isDone()); + + mockTimer.advanceClock((maxWaitMs / 2) + 1); + // Verify the future is completed after the wait time + assertTrue(future.isDone()); + assertFalse(future.isCompletedExceptionally()); + } + + @Test + public void testOnShareVersionToggle() { + String groupId = "grp"; + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + SharePartition sp3 = mock(SharePartition.class); + + // Mock the share partitions corresponding to the topic partitions. + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put( + new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0))), sp0 + ); + partitionCache.put( + new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0))), sp1 + ); + partitionCache.put( + new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0))), sp2 + ); + partitionCache.put( + new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo4", 0))), sp3 + ); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCache(partitionCache) + .build(); + assertEquals(4, partitionCache.size()); + sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); + // Because we are toggling to a share version which does not support share groups, the cache inside share partitions must be cleared. + assertEquals(0, partitionCache.size()); + //Check if all share partitions have been fenced. + Mockito.verify(sp0).markFenced(); + Mockito.verify(sp1).markFenced(); + Mockito.verify(sp2).markFenced(); + Mockito.verify(sp3).markFenced(); + } + + @Test + public void testOnShareVersionToggleWhenEnabledFromConfig() { + SharePartition sp0 = mock(SharePartition.class); + // Mock the share partitions corresponding to the topic partitions. + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.put( + new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))), sp0 + ); + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCache(partitionCache) + .build(); + assertEquals(1, partitionCache.size()); + sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, true); + // Though share version is toggled to off, but it's enabled from config, hence the cache should not be cleared. + assertEquals(1, partitionCache.size()); + Mockito.verify(sp0, times(0)).markFenced(); + } + + @Test + public void testShareGroupListener() { + String groupId = "grp"; + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Uuid memberId1 = Uuid.randomUuid(); + Uuid memberId2 = Uuid.randomUuid(); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + + ShareSessionCache cache = new ShareSessionCache(10); + cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); + cache.maybeCreateSession(groupId, memberId2, new ImplicitLinkedHashCollection<>(), "id-2"); + + SharePartitionCache partitionCache = new SharePartitionCache(); + partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); + partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp1), k -> sp1); + + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .withPartitionCache(partitionCache) + .withReplicaManager(mockReplicaManager) + .build(); + + assertEquals(2, cache.size()); + assertEquals(2, partitionCache.size()); + + // Invoke listeners by simulating connection disconnect for memberId1. + cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); + // Session cache should remove the memberId1. + assertEquals(1, cache.size()); + // Partition cache should not remove the share partitions as the group is not empty. + assertEquals(2, partitionCache.size()); + assertNotNull(cache.get(new ShareSessionKey(groupId, memberId2))); + + // Invoke listeners by simulating connection disconnect for memberId2. + cache.connectionDisconnectListener().onDisconnect("id-2"); + // Session cache should remove the memberId2. + assertEquals(0, cache.size()); + // Partition cache should remove the share partitions as the group is empty. + assertEquals(0, partitionCache.size()); + + Mockito.verify(sp0, times(1)).markFenced(); + Mockito.verify(sp1, times(1)).markFenced(); + Mockito.verify(mockReplicaManager, times(2)).removeListener(any(), any()); + } + + @Test + public void testShareGroupListenerWithEmptyCache() { + String groupId = "grp"; + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + Uuid memberId1 = Uuid.randomUuid(); + + SharePartition sp0 = mock(SharePartition.class); + + ShareSessionCache cache = new ShareSessionCache(10); + cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); + + SharePartitionCache partitionCache = spy(new SharePartitionCache()); + partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); + + sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache) + .withPartitionCache(partitionCache) + .withReplicaManager(mockReplicaManager) + .build(); + + assertEquals(1, cache.size()); + assertEquals(1, partitionCache.size()); + + // Clean up share session and partition cache. + sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); + assertEquals(0, cache.size()); + assertEquals(0, partitionCache.size()); + + Mockito.verify(sp0, times(1)).markFenced(); + Mockito.verify(mockReplicaManager, times(1)).removeListener(any(), any()); + Mockito.verify(partitionCache, times(0)).topicIdPartitionsForGroup(groupId); + + // Invoke listeners by simulating connection disconnect for member. As the group is empty, + // hence onGroupEmpty method should be invoked and should complete without any exception. + cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); + // Verify that the listener is called for the group. + Mockito.verify(partitionCache, times(1)).topicIdPartitionsForGroup(groupId); + } + private Timer systemTimerReaper() { return new SystemTimerReaper( TIMER_NAME_PREFIX + "-test-reaper", @@ -2913,7 +3064,7 @@ private void assertNoReaperThreadsPendingClose() throws InterruptedException { private void testSharePartitionListener( SharePartitionKey sharePartitionKey, - Map partitionCacheMap, + SharePartitionCache partitionCache, ReplicaManager mockReplicaManager, Consumer listenerConsumer ) { @@ -2924,22 +3075,22 @@ private void testSharePartitionListener( SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - partitionCacheMap.put(sharePartitionKey, sp0); - partitionCacheMap.put(spk, sp1); + partitionCache.put(sharePartitionKey, sp0); + partitionCache.put(spk, sp1); // Invoke listener for first share partition. listenerConsumer.accept(sharePartitionKey.topicIdPartition().topicPartition()); // Validate that the share partition is removed from the cache. - assertEquals(1, partitionCacheMap.size()); - assertFalse(partitionCacheMap.containsKey(sharePartitionKey)); + assertEquals(1, partitionCache.size()); + assertFalse(partitionCache.containsKey(sharePartitionKey)); verify(sp0, times(1)).markFenced(); verify(mockReplicaManager, times(1)).removeListener(any(), any()); // Invoke listener for non-matching share partition. listenerConsumer.accept(tp); // The non-matching share partition should not be removed as the listener is attached to a different topic partition. - assertEquals(1, partitionCacheMap.size()); + assertEquals(1, partitionCache.size()); verify(sp1, times(0)).markFenced(); // Verify the remove listener is not called for the second share partition. verify(mockReplicaManager, times(1)).removeListener(any(), any()); @@ -3065,20 +3216,22 @@ private void validateBrokerTopicStatsMetrics( static Seq> buildLogReadResult(List topicIdPartitions) { List> logReadResults = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( - new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.EMPTY), - Option.empty(), + new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.withRecords( + Compression.NONE, new SimpleRecord("test-key".getBytes(), "test-value".getBytes()))), + Optional.empty(), -1L, -1L, -1L, -1L, -1L, - Option.empty(), - Option.empty(), - Option.empty() + OptionalLong.empty(), + OptionalInt.empty(), + Optional.empty() )))); return CollectionConverters.asScala(logReadResults).toSeq(); } + @SuppressWarnings("unchecked") static void mockReplicaManagerDelayedShareFetch(ReplicaManager replicaManager, DelayedOperationPurgatory delayedShareFetchPurgatory) { doAnswer(invocationOnMock -> { @@ -3108,8 +3261,8 @@ static class SharePartitionManagerBuilder { private final Persister persister = new NoOpStatePersister(); private ReplicaManager replicaManager = mock(ReplicaManager.class); private Time time = new MockTime(); - private ShareSessionCache cache = new ShareSessionCache(10, 1000); - private Map partitionCacheMap = new HashMap<>(); + private ShareSessionCache cache = new ShareSessionCache(10); + private SharePartitionCache partitionCache = new SharePartitionCache(); private Timer timer = new MockTimer(); private ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); private BrokerTopicStats brokerTopicStats; @@ -3129,8 +3282,8 @@ private SharePartitionManagerBuilder withCache(ShareSessionCache cache) { return this; } - SharePartitionManagerBuilder withPartitionCacheMap(Map partitionCacheMap) { - this.partitionCacheMap = partitionCacheMap; + SharePartitionManagerBuilder withPartitionCache(SharePartitionCache partitionCache) { + this.partitionCache = partitionCache; return this; } @@ -3157,11 +3310,12 @@ public SharePartitionManager build() { return new SharePartitionManager(replicaManager, time, cache, - partitionCacheMap, + partitionCache, DEFAULT_RECORD_LOCK_DURATION_MS, timer, MAX_DELIVERY_COUNT, MAX_IN_FLIGHT_MESSAGES, + REMOTE_FETCH_MAX_WAIT_MS, persister, mock(GroupConfigManager.class), shareGroupMetrics, diff --git a/core/src/test/java/kafka/server/share/SharePartitionTest.java b/core/src/test/java/kafka/server/share/SharePartitionTest.java index 64781648774d6..25432b4ae15e7 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionTest.java @@ -17,8 +17,7 @@ package kafka.server.share; import kafka.server.ReplicaManager; -import kafka.server.share.SharePartition.InFlightState; -import kafka.server.share.SharePartition.RecordState; +import kafka.server.share.SharePartition.GapWindow; import kafka.server.share.SharePartition.SharePartitionState; import kafka.server.share.SharePartitionManager.SharePartitionListener; @@ -28,10 +27,10 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; -import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRequestException; +import org.apache.kafka.common.errors.LeaderNotAvailableException; import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; @@ -55,6 +54,10 @@ import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.coordinator.group.ShareGroupAutoOffsetResetStrategy; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; +import org.apache.kafka.server.share.fetch.AcquisitionLockTimerTask; +import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; +import org.apache.kafka.server.share.fetch.InFlightState; +import org.apache.kafka.server.share.fetch.RecordState; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.metrics.SharePartitionMetrics; import org.apache.kafka.server.share.persister.NoOpStatePersister; @@ -67,9 +70,9 @@ import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.FutureUtils; -import org.apache.kafka.server.util.timer.SystemTimer; -import org.apache.kafka.server.util.timer.SystemTimerReaper; +import org.apache.kafka.server.util.timer.MockTimer; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.apache.kafka.test.TestUtils; @@ -107,6 +110,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +@SuppressWarnings("resource") public class SharePartitionTest { private static final String ACQUISITION_LOCK_NEVER_GOT_RELEASED = "Acquisition lock never got released."; @@ -115,9 +119,9 @@ public class SharePartitionTest { private static final TopicIdPartition TOPIC_ID_PARTITION = new TopicIdPartition(Uuid.randomUuid(), 0, "test-topic"); private static final String MEMBER_ID = "member-1"; private static final Time MOCK_TIME = new MockTime(); - private static final short MAX_IN_FLIGHT_MESSAGES = 200; + private static final short MAX_IN_FLIGHT_RECORDS = 200; private static final int ACQUISITION_LOCK_TIMEOUT_MS = 100; - private static final int DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS = 300; + private static final int DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS = 120; private static final int BATCH_SIZE = 500; private static final int DEFAULT_FETCH_OFFSET = 0; private static final int MAX_FETCH_RECORDS = Integer.MAX_VALUE; @@ -129,8 +133,7 @@ public class SharePartitionTest { @BeforeEach public void setUp() { kafka.utils.TestUtils.clearYammerMetrics(); - mockTimer = new SystemTimerReaper("share-group-lock-timeout-test-reaper", - new SystemTimer("share-group-lock-test-timeout")); + mockTimer = new MockTimer(); sharePartitionMetrics = new SharePartitionMetrics(GROUP_ID, TOPIC_ID_PARTITION.topic(), TOPIC_ID_PARTITION.partition()); } @@ -140,45 +143,6 @@ public void tearDown() throws Exception { sharePartitionMetrics.close(); } - @Test - public void testRecordStateValidateTransition() { - // Null check. - assertThrows(NullPointerException.class, () -> RecordState.AVAILABLE.validateTransition(null)); - // Same state transition check. - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ACQUIRED.validateTransition(RecordState.ACQUIRED)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Acknowledged state. - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACQUIRED)); - assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Archived state. - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.AVAILABLE)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); - // Invalid state transition to any other state from Available state other than Acquired. - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ACKNOWLEDGED)); - assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ARCHIVED)); - - // Successful transition from Available to Acquired. - assertEquals(RecordState.ACQUIRED, RecordState.AVAILABLE.validateTransition(RecordState.ACQUIRED)); - // Successful transition from Acquired to any state. - assertEquals(RecordState.AVAILABLE, RecordState.ACQUIRED.validateTransition(RecordState.AVAILABLE)); - assertEquals(RecordState.ACKNOWLEDGED, RecordState.ACQUIRED.validateTransition(RecordState.ACKNOWLEDGED)); - assertEquals(RecordState.ARCHIVED, RecordState.ACQUIRED.validateTransition(RecordState.ARCHIVED)); - } - - @Test - public void testRecordStateForId() { - assertEquals(RecordState.AVAILABLE, RecordState.forId((byte) 0)); - assertEquals(RecordState.ACQUIRED, RecordState.forId((byte) 1)); - assertEquals(RecordState.ACKNOWLEDGED, RecordState.forId((byte) 2)); - assertEquals(RecordState.ARCHIVED, RecordState.forId((byte) 4)); - // Invalid check. - assertThrows(IllegalArgumentException.class, () -> RecordState.forId((byte) 5)); - } - @Test public void testMaybeInitialize() throws InterruptedException { Persister persister = Mockito.mock(Persister.class); @@ -759,7 +723,7 @@ public void testMaybeInitializeWithErrorPartitionResponse() { result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(FencedStateEpochException.class, result); + assertFutureThrows(NotLeaderOrFollowerException.class, result); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock FENCED_LEADER_EPOCH error. @@ -789,6 +753,20 @@ public void testMaybeInitializeWithErrorPartitionResponse() { assertTrue(result.isCompletedExceptionally()); assertFutureThrows(UnknownServerException.class, result); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); + + // Mock NETWORK_EXCEPTION error. + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NETWORK_EXCEPTION.code(), Errors.NETWORK_EXCEPTION.message(), + List.of()))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(UnknownServerException.class, result); + assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @Test @@ -936,6 +914,19 @@ public void testMaybeInitializeWithReadException() { assertThrows(RuntimeException.class, sharePartition2::maybeInitialize); } + @Test + public void testMaybeInitializeFencedSharePartition() { + SharePartition sharePartition = SharePartitionBuilder.builder().build(); + // Mark the share partition as fenced. + sharePartition.markFenced(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(LeaderNotAvailableException.class, result); + assertEquals(SharePartitionState.FENCED, sharePartition.partitionState()); + } + @Test public void testMaybeInitializeStateBatchesWithGapAtBeginning() { Persister persister = Mockito.mock(Persister.class); @@ -974,11 +965,11 @@ public void testMaybeInitializeStateBatchesWithGapAtBeginning() { assertEquals(3, sharePartition.cachedState().get(21L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(21L).offsetState()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(10, initialReadGapOffset.gapStartOffset()); - assertEquals(30, initialReadGapOffset.endOffset()); + assertEquals(10, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(30, persisterReadResultGapWindow.endOffset()); } @Test @@ -1019,11 +1010,11 @@ public void testMaybeInitializeStateBatchesWithMultipleGaps() { assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(30L).offsetState()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(10, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(10, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -1060,11 +1051,11 @@ public void testMaybeInitializeStateBatchesWithGapNotAtBeginning() { assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(30L).offsetState()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(21, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -1091,184 +1082,850 @@ public void testMaybeInitializeStateBatchesWithoutGaps() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(31, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - // Since there are no gaps present in the readState response, initialReadGapOffset should be null - assertNull(initialReadGapOffset); + // Since there are no gaps present in the readState response, persisterReadResultGapWindow should be null + assertNull(persisterReadResultGapWindow); } @Test - public void testAcquireSingleRecord() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - MemoryRecords records = memoryRecords(1); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 1); - - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(1, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(0, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); + public void testMaybeInitializeAndAcquire() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 1, - "In-flight message count should be 1."); - assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().sum()); - } + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); - @Test - public void testAcquireMultipleRecords() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - MemoryRecords records = memoryRecords(5, 10); + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(10, sharePartition.nextFetchOffset()); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3L, 5); + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create a single batch record that covers the entire range from 10 to 30 of initial read gap. + // The records in the batch are from 10 to 49. + MemoryRecords records = memoryRecords(40, 10); + // Set max fetch records to 1, records will be acquired till the first gap is encountered. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 1, + 10, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 5); - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertArrayEquals(expectedAcquiredRecord(10, 14, 1).toArray(), acquiredRecordsList.toArray()); assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); + assertEquals(4, sharePartition.cachedState().size()); assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(10L).offsetState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(15L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5, - "In-flight message count should be 5."); - assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum()); - } + // Send the same batch again to acquire the next set of records. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 10, + 15, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 13); - @Test - public void testAcquireWithMaxFetchRecords() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Less-number of records than max fetch records. - MemoryRecords records = memoryRecords(5); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 5); + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4)); - assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(31, sharePartition.nextFetchOffset()); + assertEquals(6, sharePartition.cachedState().size()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(1, sharePartition.cachedState().get(19L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(19L).offsetState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); + assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertEquals(1, sharePartition.cachedState().get(23L).batchDeliveryCount()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); + assertEquals(30L, sharePartition.endOffset()); + // As all the gaps are now filled, the persisterReadResultGapWindow should be null. + assertNull(sharePartition.persisterReadResultGapWindow()); - // More-number of records than max fetch records, but from 0 offset hence previous 10 records - // should be ignored and new full batch till end should be acquired. - records = memoryRecords(25); + // Now initial read gap is filled, so the complete batch can be acquired despite max fetch records being 1. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 20); + MEMBER_ID, + BATCH_SIZE, + 1, + 31, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 19); - assertArrayEquals(expectedAcquiredRecord(5, 24, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); - assertEquals(24, sharePartition.cachedState().get(5L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); + assertArrayEquals(expectedAcquiredRecord(31, 49, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(50, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.cachedState().size()); + assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); + assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); + assertEquals(1, sharePartition.cachedState().get(31L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(31L).offsetState()); + assertEquals(49L, sharePartition.endOffset()); } @Test - public void testAcquireWithMultipleBatchesAndMaxFetchRecords() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); + public void testMaybeInitializeAndAcquireWithHigherMaxFetchRecords() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - // Create 3 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 15, 15).close(); - memoryRecordsBuilder(buffer, 15, 30).close(); + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); - buffer.flip(); + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(10, sharePartition.nextFetchOffset()); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Acquire 10 records. + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create a single batch record that covers the entire range from 10 to 30 of initial read gap. + // The records in the batch are from 10 to 49. + MemoryRecords records = memoryRecords(40, 10); + // Set max fetch records to 500, all records should be acquired. List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records, 10), - FETCH_ISOLATION_HWM), - 20); + MEMBER_ID, + BATCH_SIZE, + 500, + 10, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 37); - // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch - // should be skipped. - assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(30, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(50, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.cachedState().size()); assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(29, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); + assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); + assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); + assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 20, - "In-flight message count should be 20."); - assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); + assertEquals(49L, sharePartition.endOffset()); + // As all the gaps are now filled, the persisterReadResultGapWindow should be null. + assertNull(sharePartition.persisterReadResultGapWindow()); } @Test - public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 0); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); - - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(5, sharePartition.nextFetchOffset()); + public void testMaybeInitializeAndAcquireWithFetchBatchLastOffsetWithinCachedBatch() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. - records = memoryRecords(10, 0); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); - assertArrayEquals(expectedAcquiredRecords(memoryRecords(5, 5), 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); assertEquals(10, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - } - - @Test - public void testAcquireSameBatchAgain() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 10); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create a single batch record that ends in between the cached batch and the fetch offset is + // post startOffset. + MemoryRecords records = memoryRecords(16, 12); + // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500, + 10, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 13); + + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(12, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(28, sharePartition.nextFetchOffset()); + assertEquals(6, sharePartition.cachedState().size()); + assertEquals(12, sharePartition.cachedState().get(12L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(12L).lastOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); + assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(12L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + } + + @Test + public void testMaybeInitializeAndAcquireWithFetchBatchPriorStartOffset() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(10, sharePartition.nextFetchOffset()); + + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create a single batch record where first offset is prior startOffset. + MemoryRecords records = memoryRecords(16, 6); + // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500, + 10, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 10); + + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(23, sharePartition.nextFetchOffset()); + assertEquals(5, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + } + + @Test + public void testMaybeInitializeAndAcquireWithMultipleBatches() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(5, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(5, sharePartition.nextFetchOffset()); + + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create multiple batch records that covers the entire range from 5 to 30 of initial read gap. + // The records in the batch are from 5 to 49. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 2, 5).close(); + memoryRecordsBuilder(buffer, 1, 8).close(); + memoryRecordsBuilder(buffer, 2, 10).close(); + memoryRecordsBuilder(buffer, 6, 13).close(); + memoryRecordsBuilder(buffer, 3, 19).close(); + memoryRecordsBuilder(buffer, 9, 22).close(); + memoryRecordsBuilder(buffer, 19, 31).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Set max fetch records to 1, records will be acquired till the first gap is encountered. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 1, + 5L, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 2); + + assertArrayEquals(expectedAcquiredRecord(5, 6, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(7, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.cachedState().size()); + assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); + assertEquals(6, sharePartition.cachedState().get(5L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(7L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + + // Remove first batch from the records as the fetch offset has moved forward to 7 offset. + List batch = TestUtils.toList(records.batches()); + records = records.slice(batch.get(0).sizeInBytes(), records.sizeInBytes() - batch.get(0).sizeInBytes()); + // Send the batch again to acquire the next set of records. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 3, + 7L, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 4); + + assertArrayEquals(expectedAcquiredRecord(8, 11, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(5, sharePartition.cachedState().size()); + assertEquals(8, sharePartition.cachedState().get(8L).firstOffset()); + assertEquals(11, sharePartition.cachedState().get(8L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(8L).batchState()); + assertEquals(1, sharePartition.cachedState().get(8L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(8L).offsetState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(12L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + + // Remove the next 2 batches from the records as the fetch offset has moved forward to 12 offset. + int size = batch.get(1).sizeInBytes() + batch.get(2).sizeInBytes(); + records = records.slice(size, records.sizeInBytes() - size); + // Send the records with 8 as max fetch records to acquire new and existing cached batches. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 8, + 12, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 10); + + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(13, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(26, sharePartition.nextFetchOffset()); + assertEquals(8, sharePartition.cachedState().size()); + assertEquals(13, sharePartition.cachedState().get(13L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(13L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(13L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); + assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(26L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + + // Remove the next 2 batches from the records as the fetch offset has moved forward to 26 offset. + // Do not remove the 5th batch as it's only partially acquired. + size = batch.get(3).sizeInBytes() + batch.get(4).sizeInBytes(); + records = records.slice(size, records.sizeInBytes() - size); + // Send the records with 10 as max fetch records to acquire the existing and till end of the + // fetched data. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 10, + 26, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 24); + + expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(26, 30, 4)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(50, sharePartition.nextFetchOffset()); + assertEquals(9, sharePartition.cachedState().size()); + assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); + assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); + assertEquals(49L, sharePartition.endOffset()); + // As all the gaps are now filled, the persisterReadResultGapWindow should be null. + assertNull(sharePartition.persisterReadResultGapWindow()); + } + + @Test + public void testMaybeInitializeAndAcquireWithMultipleBatchesAndLastOffsetWithinCachedBatch() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(5, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(5, sharePartition.nextFetchOffset()); + + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create multiple batch records that ends in between the cached batch and the fetch offset is + // post startOffset. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 2, 7).close(); + memoryRecordsBuilder(buffer, 2, 10).close(); + memoryRecordsBuilder(buffer, 6, 13).close(); + // Though 19 offset is a gap but still be acquired. + memoryRecordsBuilder(buffer, 8, 20).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500, + 5, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 18); + + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(7, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(28, sharePartition.nextFetchOffset()); + assertEquals(6, sharePartition.cachedState().size()); + assertEquals(7, sharePartition.cachedState().get(7L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(7L).lastOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); + assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + } + + @Test + public void testMaybeInitializeAndAcquireWithMultipleBatchesPriorStartOffset() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), + List.of( + new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), + new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(10, sharePartition.nextFetchOffset()); + + assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); + assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); + assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + + // Create multiple batch records where multiple batches base offsets are prior startOffset. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 2, 3).close(); + memoryRecordsBuilder(buffer, 1, 6).close(); + memoryRecordsBuilder(buffer, 4, 8).close(); + memoryRecordsBuilder(buffer, 10, 13).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500, + 10, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 10); + + List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); + expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(23, sharePartition.nextFetchOffset()); + assertEquals(5, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); + assertEquals(30L, sharePartition.endOffset()); + assertNotNull(sharePartition.persisterReadResultGapWindow()); + assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + } + + @Test + public void testAcquireSingleRecord() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .build(); + MemoryRecords records = memoryRecords(1); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 1); + + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(1, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(0, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); + + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, + "In-flight batch count should be 1."); + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 1, + "In-flight message count should be 1."); + assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + } + + @Test + public void testAcquireMultipleRecords() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .build(); + MemoryRecords records = memoryRecords(5, 10); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3L, 5); + + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); + + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, + "In-flight batch count should be 1."); + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5, + "In-flight message count should be 5."); + assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + } + + @Test + public void testAcquireWithMaxFetchRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Less-number of records than max fetch records. + MemoryRecords records = memoryRecords(5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 10, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 5); + + assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); + + // More-number of records than max fetch records, but from 0 offset hence previous 10 records + // should be ignored and new full batch till end should be acquired. + records = memoryRecords(25); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 10, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records), + FETCH_ISOLATION_HWM), + 20); + + assertArrayEquals(expectedAcquiredRecord(5, 24, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(25, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); + assertEquals(24, sharePartition.cachedState().get(5L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + } + + @Test + public void testAcquireWithMultipleBatchesAndMaxFetchRecords() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .build(); + + // Create 3 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 15, 15).close(); + memoryRecordsBuilder(buffer, 15, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Acquire 10 records. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 10, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 10), + FETCH_ISOLATION_HWM), + 20); + + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch + // should be skipped. + assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(30, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(29, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); + + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, + "In-flight batch count should be 1."); + TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 20, + "In-flight message count should be 20."); + assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + } + + @Test + public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 0); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(5, sharePartition.nextFetchOffset()); + + // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. + records = memoryRecords(10, 0); + acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + + assertArrayEquals(expectedAcquiredRecords(memoryRecords(5, 5), 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + } + + @Test + public void testAcquireSameBatchAgain() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 10); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); assertEquals(15, sharePartition.nextFetchOffset()); @@ -1522,6 +2179,150 @@ public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() assertTrue(sharePartition.cachedState().containsKey(12L)); } + @Test + public void testAcquireWithMaxInFlightRecordsAndTryAcquireNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightRecords(20) + .build(); + + // Acquire records, all 10 records should be acquired as within maxInflightRecords limit. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(memoryRecords(10, 0), 0), + FETCH_ISOLATION_HWM), + 10); + // Validate all 10 records will be acquired as the maxInFlightRecords is 20. + assertArrayEquals(expectedAcquiredRecord(0, 9, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(10, sharePartition.nextFetchOffset()); + + // Create 4 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 10, 15).close(); + memoryRecordsBuilder(buffer, 5, 25).close(); + memoryRecordsBuilder(buffer, 2, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + // Acquire records, should be acquired till maxInFlightRecords i.e. 20 records. As second batch + // is ending at 24 offset, hence additional 15 records will be acquired. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 0), + FETCH_ISOLATION_HWM), + 15); + + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch + // should be skipped. + assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(25, sharePartition.nextFetchOffset()); + + // Should not acquire any records as the share partition is at capacity and fetch offset is beyond + // the end offset. + fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + 25 /* Fetch Offset */, + fetchPartitionData(memoryRecords(10, 25), 10), + FETCH_ISOLATION_HWM), + 0); + + assertEquals(25, sharePartition.nextFetchOffset()); + } + + @Test + public void testAcquireWithMaxInFlightRecordsAndReleaseLastOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightRecords(20) + .build(); + + // Create 4 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 10, 15).close(); + memoryRecordsBuilder(buffer, 5, 25).close(); + memoryRecordsBuilder(buffer, 3, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Acquire records, should be acquired till maxInFlightRecords i.e. 20 records till 29 offset. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 10), + FETCH_ISOLATION_HWM), + 20); + + // Validate 3 batches are fetched and fourth batch should be skipped. Max in-flight records + // limit is reached. + assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(30, sharePartition.nextFetchOffset()); + + // Release middle batch. + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + List.of(new ShareAcknowledgementBatch(15, 19, List.of((byte) 2)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); + // Validate the nextFetchOffset is updated to 15. + assertEquals(15, sharePartition.nextFetchOffset()); + + // The complete released batch should be acquired but not the last batch, starting at offset 30, + // as the lastOffset is adjusted according to the endOffset. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + 15 /* Fetch Offset */, + fetchPartitionData(records, 10), + FETCH_ISOLATION_HWM), + 5); + + // Validate 1 batch is fetched, with 5 records till end of batch, last available batch should + // not be acquired + assertArrayEquals(expectedAcquiredRecords(15, 19, 2).toArray(), acquiredRecordsList.toArray()); + assertEquals(30, sharePartition.nextFetchOffset()); + + // Release last offset of the acquired batch. Only 1 record should be released and later acquired. + ackResult = sharePartition.acknowledge( + MEMBER_ID, + List.of(new ShareAcknowledgementBatch(29, 29, List.of((byte) 2)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); + // Validate the nextFetchOffset is updated to 29. + assertEquals(29, sharePartition.nextFetchOffset()); + + // Only the last record of the acquired batch should be acquired again. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + 29 /* Fetch Offset */, + fetchPartitionData(records, 10), + FETCH_ISOLATION_HWM), + 1); + + // Validate 1 record is acquired. + assertArrayEquals(expectedAcquiredRecord(29, 29, 2).toArray(), acquiredRecordsList.toArray()); + assertEquals(30, sharePartition.nextFetchOffset()); + } + @Test public void testNextFetchOffsetInitialState() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); @@ -1538,7 +2339,7 @@ public void testNextFetchOffsetWithCachedStateAcquired() { @Test public void testNextFetchOffsetWithFindAndCachedStateEmpty() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.findNextFetchOffset(true); + sharePartition.updateFindNextFetchOffset(true); assertTrue(sharePartition.findNextFetchOffset()); assertEquals(0, sharePartition.nextFetchOffset()); assertFalse(sharePartition.findNextFetchOffset()); @@ -1547,7 +2348,7 @@ public void testNextFetchOffsetWithFindAndCachedStateEmpty() { @Test public void testNextFetchOffsetWithFindAndCachedState() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.findNextFetchOffset(true); + sharePartition.updateFindNextFetchOffset(true); assertTrue(sharePartition.findNextFetchOffset()); fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); @@ -1558,29 +2359,29 @@ public void testNextFetchOffsetWithFindAndCachedState() { @Test public void testCanAcquireRecordsWithEmptyCache() { - SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightMessages(1).build(); + SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightRecords(1).build(); assertTrue(sharePartition.canAcquireRecords()); } @Test public void testCanAcquireRecordsWithCachedDataAndLimitNotReached() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(6) + .withMaxInflightRecords(6) .withState(SharePartitionState.ACTIVE) .build(); fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); - // Limit not reached as only 6 in-flight messages is the limit. + // Limit not reached as only 6 in-flight records is the limit. assertTrue(sharePartition.canAcquireRecords()); } @Test public void testCanAcquireRecordsWithCachedDataAndLimitReached() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(1) + .withMaxInflightRecords(1) .withState(SharePartitionState.ACTIVE) .build(); fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); - // Limit reached as only one in-flight message is the limit. + // Limit reached as only one in-flight record is the limit. assertFalse(sharePartition.canAcquireRecords()); } @@ -1605,12 +2406,14 @@ public void testMaybeAcquireAndReleaseFetchLock() { .withSharePartitionMetrics(sharePartitionMetrics) .build(); + Uuid fetchId = Uuid.randomUuid(); + sharePartition.maybeInitialize(); - assertTrue(sharePartition.maybeAcquireFetchLock()); + assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); // Lock cannot be acquired again, as already acquired. - assertFalse(sharePartition.maybeAcquireFetchLock()); + assertFalse(sharePartition.maybeAcquireFetchLock(fetchId)); // Release the lock. - sharePartition.releaseFetchLock(); + sharePartition.releaseFetchLock(fetchId); assertEquals(1, sharePartitionMetrics.fetchLockTimeMs().count()); assertEquals(10, sharePartitionMetrics.fetchLockTimeMs().sum()); @@ -1619,9 +2422,9 @@ public void testMaybeAcquireAndReleaseFetchLock() { assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean()); // Lock can be acquired again. - assertTrue(sharePartition.maybeAcquireFetchLock()); + assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); // Release lock to update metrics and verify. - sharePartition.releaseFetchLock(); + sharePartition.releaseFetchLock(fetchId); assertEquals(2, sharePartitionMetrics.fetchLockTimeMs().count()); assertEquals(40, sharePartitionMetrics.fetchLockTimeMs().sum()); @@ -1649,14 +2452,15 @@ public void testRecordFetchLockRatioMetric() { .thenReturn(80L) // for time when lock is released .thenReturn(160L); // to update lock idle duration while acquiring lock again. - assertTrue(sharePartition.maybeAcquireFetchLock()); - sharePartition.releaseFetchLock(); + Uuid fetchId = Uuid.randomUuid(); + assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); + sharePartition.releaseFetchLock(fetchId); // Acquired time is 70 but last lock acquisition time was still 0, as it's the first request // when last acquisition time was recorded. The last acquisition time should be updated to 80. assertEquals(2, sharePartitionMetrics.fetchLockRatio().count()); assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean()); - assertTrue(sharePartition.maybeAcquireFetchLock()); + assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); // Update metric again with 0 as acquire time and 80 as idle duration ms. sharePartition.recordFetchLockRatioMetric(0); assertEquals(3, sharePartitionMetrics.fetchLockRatio().count()); @@ -1672,7 +2476,11 @@ public void testRecordFetchLockRatioMetric() { @Test public void testAcknowledgeSingleRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withReplicaManager(replicaManager) + .withState(SharePartitionState.ACTIVE) + .build(); MemoryRecords records1 = memoryRecords(1, 0); MemoryRecords records2 = memoryRecords(1, 1); @@ -1695,11 +2503,18 @@ public void testAcknowledgeSingleRecordBatch() { assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(1L).batchState()); assertEquals(1, sharePartition.cachedState().get(1L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(1L).offsetState()); + // Should not invoke completeDelayedShareFetchRequest as the first offset is not acknowledged yet. + Mockito.verify(replicaManager, Mockito.times(0)) + .completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION)); } @Test public void testAcknowledgeMultipleRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withReplicaManager(replicaManager) + .withState(SharePartitionState.ACTIVE) + .build(); MemoryRecords records = memoryRecords(10, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10); @@ -1713,6 +2528,9 @@ public void testAcknowledgeMultipleRecordBatch() { assertEquals(15, sharePartition.nextFetchOffset()); assertEquals(0, sharePartition.cachedState().size()); + // Should invoke completeDelayedShareFetchRequest as the start offset is moved. + Mockito.verify(replicaManager, Mockito.times(1)) + .completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION)); } @Test @@ -2216,12 +3034,12 @@ public void testAcquireGapAtBeginningAndRecordsFetchedFromGap() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(16, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(16, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(16, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2255,12 +3073,12 @@ public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightBatches() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(41, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(21, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2308,12 +3126,12 @@ public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightAvailableBa assertEquals(3, sharePartition.stateEpoch()); assertEquals(26, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(26, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(26, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2352,12 +3170,12 @@ public void testAcquireWhenCachedStateContainsGapsAndRecordsFetchedFromNonGapOff assertEquals(3, sharePartition.stateEpoch()); assertEquals(26, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(26, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(26, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2409,12 +3227,12 @@ public void testAcquireGapAtBeginningAndFetchedRecordsOverlapMultipleInFlightBat assertEquals(3, sharePartition.stateEpoch()); assertEquals(86, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(86, initialReadGapOffset.gapStartOffset()); - assertEquals(90, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(86, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(90, persisterReadResultGapWindow.endOffset()); } @Test @@ -2453,12 +3271,12 @@ public void testAcquireGapAtBeginningAndFetchedRecordsEndJustBeforeGap() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(31, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(31, initialReadGapOffset.gapStartOffset()); - assertEquals(70, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(31, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(70, persisterReadResultGapWindow.endOffset()); } @Test @@ -2504,12 +3322,12 @@ public void testAcquireGapAtBeginningAndFetchedRecordsIncludeGapOffsetsAtEnd() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(76, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - // After records are acquired, the initialReadGapOffset should be updated - assertEquals(76, initialReadGapOffset.gapStartOffset()); - assertEquals(90, initialReadGapOffset.endOffset()); + // After records are acquired, the persisterReadResultGapWindow should be updated + assertEquals(76, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(90, persisterReadResultGapWindow.endOffset()); } @@ -2557,11 +3375,11 @@ public void testAcquireWhenRecordsFetchedFromGapAndMaxFetchRecordsIsExceeded() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(27, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(27, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(27, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2606,11 +3424,11 @@ public void testAcquireMaxFetchRecordsExceededAfterAcquiringGaps() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(21, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(21, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2655,11 +3473,11 @@ public void testAcquireMaxFetchRecordsExceededBeforeAcquiringGaps() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(21, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(21, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -2707,8 +3525,8 @@ public void testAcquireWhenRecordsFetchedFromGapAndPartitionContainsNaturalGaps( assertEquals(3, sharePartition.stateEpoch()); assertEquals(51, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNull(persisterReadResultGapWindow); } @Test @@ -2751,8 +3569,8 @@ public void testAcquireCachedStateInitialGapMatchesWithActualPartitionGap() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(61, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNull(persisterReadResultGapWindow); } @Test @@ -2797,8 +3615,8 @@ public void testAcquireCachedStateInitialGapOverlapsWithActualPartitionGap() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(61, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNull(persisterReadResultGapWindow); } @Test @@ -2846,8 +3664,8 @@ public void testAcquireCachedStateGapInBetweenOverlapsWithActualPartitionGap() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(61, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNull(persisterReadResultGapWindow); } @Test @@ -2887,11 +3705,11 @@ public void testAcquireWhenRecordsFetchedAfterGapsAreFetched() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(41, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(31, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(31, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); // Fetching from the nextFetchOffset so that endOffset moves ahead records = memoryRecords(15, 41); @@ -2907,9 +3725,9 @@ public void testAcquireWhenRecordsFetchedAfterGapsAreFetched() { assertEquals(3, sharePartition.stateEpoch()); assertEquals(56, sharePartition.nextFetchOffset()); - // Since the endOffset is now moved ahead, the initialReadGapOffset should be empty - initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNull(initialReadGapOffset); + // Since the endOffset is now moved ahead, the persisterReadResultGapWindow should be empty + persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNull(persisterReadResultGapWindow); } @Test @@ -2925,6 +3743,7 @@ public void testAcquisitionLockForAcquiringSingleRecord() throws InterruptedExce assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.nextFetchOffset() == 0 && sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && @@ -2951,6 +3770,7 @@ public void testAcquisitionLockForAcquiringMultipleRecords() throws InterruptedE assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 10 @@ -2985,6 +3805,7 @@ public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch assertEquals(2, sharePartition.timer().size()); // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for all the acquired records. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 0 && @@ -3012,6 +3833,7 @@ public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedEx assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 10 && @@ -3128,6 +3950,7 @@ public void testAcquisitionLockOnAcknowledgingMultipleRecordBatchWithGapOffsets( // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for batch with starting offset 1. // Since, other records have been acknowledged. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 1 && @@ -3155,6 +3978,7 @@ public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws Interrupted assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 10 && @@ -3179,6 +4003,7 @@ public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws Interrupted assertEquals(3, sharePartition.timer().size()); // Allowing acquisition lock to expire for the acquired subset batch. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap = new HashMap<>(); @@ -3259,6 +4084,7 @@ public void testAcquisitionLockOnAcknowledgingMultipleSubsetRecordBatchWithGapOf assertEquals(3, sharePartition.timer().size()); // Allowing acquisition lock to expire for the offsets that have not been acknowledged yet. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap1 = new HashMap<>(); @@ -3321,6 +4147,7 @@ public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws Inter assertEquals(2, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 0 && @@ -3338,6 +4165,7 @@ public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws Inter assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire to archive the records that reach max delivery count. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 0 && @@ -3363,6 +4191,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedE assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 0 && @@ -3386,6 +4215,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedE assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask()); // Allowing acquisition lock to expire to archive the records that reach max delivery count. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap = new HashMap<>(); @@ -3436,6 +4266,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 0 && @@ -3450,6 +4281,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire to archive the records that reach max delivery count. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && // After the second failed attempt to acknowledge the record batch successfully, the record batch is archived. @@ -3473,6 +4305,7 @@ public void testAcknowledgeAfterAcquisitionLockTimeout() throws InterruptedExcep assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && @@ -3531,6 +4364,7 @@ public void testAcquisitionLockAfterDifferentAcknowledges() throws InterruptedEx assertEquals(1, sharePartition.timer().size()); // Allowing acquisition lock to expire will only affect the offsets that have not been acknowledged yet. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { // Check cached state. @@ -3576,6 +4410,7 @@ public void testAcquisitionLockOnBatchWithWriteShareGroupStateFailure() throws I assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && @@ -3616,6 +4451,7 @@ public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap = new HashMap<>(); @@ -3652,7 +4488,8 @@ public void testReleaseSingleRecordBatch() { assertEquals(0, sharePartition.nextFetchOffset()); assertEquals(1, sharePartition.cachedState().size()); assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + // Release delivery count. + assertEquals(0, sharePartition.cachedState().get(0L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(0L).offsetState()); } @@ -3669,7 +4506,7 @@ public void testReleaseMultipleRecordBatch() { assertEquals(5, sharePartition.nextFetchOffset()); assertEquals(1, sharePartition.cachedState().size()); assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(5L).offsetState()); } @@ -3731,7 +4568,7 @@ public void testReleaseAcknowledgedMultipleSubsetRecordBatch() { assertEquals(5, sharePartition.nextFetchOffset()); // Check cached state. Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); @@ -3745,8 +4582,8 @@ public void testReleaseAcknowledgedMultipleSubsetRecordBatch() { expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @@ -3792,8 +4629,8 @@ public void testReleaseAcquiredRecordsWithAnotherMember() { expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); // Release acquired records for "member-2". @@ -3815,8 +4652,8 @@ public void testReleaseAcquiredRecordsWithAnotherMember() { expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @@ -3862,8 +4699,8 @@ public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); // Ack subset of records by "member-2". @@ -3879,7 +4716,7 @@ public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { // Check cached state. expectedOffsetStateMap.clear(); expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); expectedOffsetStateMap.clear(); expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); @@ -3891,8 +4728,8 @@ public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @@ -3926,14 +4763,14 @@ public void testReleaseAcquiredRecordsAfterDifferentAcknowledges() { Map expectedOffsetStateMap = new HashMap<>(); expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); } @Test - public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquiredRecords() { + public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecords() { SharePartition sharePartition = SharePartitionBuilder.builder() .withMaxDeliveryCount(2) .withState(SharePartitionState.ACTIVE) @@ -3955,12 +4792,12 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquire assertEquals(0, sharePartition.nextFetchOffset()); assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquiredRecordsSubset() { + public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecordsSubset() { SharePartition sharePartition = SharePartitionBuilder.builder() .withMaxDeliveryCount(2) .withState(SharePartitionState.ACTIVE) @@ -3999,21 +4836,21 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquire assertNotNull(sharePartition.cachedState().get(10L).offsetState()); assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(15L).batchState()); assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState()); assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); assertNull(sharePartition.cachedState().get(20L).offsetState()); Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); @@ -4050,9 +4887,10 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { fetchAcquiredRecords(sharePartition, records2, 2); fetchAcquiredRecords(sharePartition, records3, 5); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); + sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( + new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)), + new ShareAcknowledgementBatch(20, 24, List.of((byte) 2)) + ))); assertEquals(25, sharePartition.nextFetchOffset()); assertEquals(0, sharePartition.cachedState().size()); @@ -4172,7 +5010,7 @@ public void testAcquisitionLockOnReleasingMultipleRecordBatch() { assertEquals(5, sharePartition.nextFetchOffset()); assertEquals(1, sharePartition.cachedState().size()); assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(5L).offsetState()); // Acquisition lock timer task would be cancelled by the release acquired records operation. assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); @@ -4182,7 +5020,6 @@ public void testAcquisitionLockOnReleasingMultipleRecordBatch() { @Test public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchWithGapOffsets() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); MemoryRecords records1 = memoryRecords(2, 5); @@ -4215,7 +5052,7 @@ public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchW assertEquals(5, sharePartition.nextFetchOffset()); // Check cached state. Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); @@ -4229,8 +5066,8 @@ public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchW expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); // Acquisition lock timer task would be cancelled by the release acquired records operation. @@ -4294,7 +5131,7 @@ public void testLsoMovementForArchivingBatches() { assertEquals(20, sharePartition.startOffset()); assertEquals(36, sharePartition.endOffset()); - // For cached state corresponding to entry 2, the batch state will be ACKNOWLEDGED, hence it will be cleared as part of acknowledgment. + // For cached state corresponding to entry 2, the batch state will be ACKNOWLEDGED, hence it will be cleared as part of acknowledgement. assertEquals(6, sharePartition.cachedState().size()); assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); @@ -4322,6 +5159,122 @@ public void testLsoMovementForArchivingBatches() { assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask()); } + @Test + public void testLsoMovementForArchivingAllAvailableBatches() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50. + fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10); + + // After the acknowledgements, the state of share partition will be: + // 1. 11 -> 20: AVAILABLE + // 2. 21 -> 30: ACQUIRED + // 3. 31 -> 40: AVAILABLE + // 4. 41 -> 50: ACQUIRED + sharePartition.acknowledge(MEMBER_ID, List.of( + new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)), + new ShareAcknowledgementBatch(31, 40, List.of((byte) 2)) + )); + + // Move the LSO to 41. When the LSO moves ahead, all batches that are AVAILABLE before the new LSO will be ARCHIVED. + // Thus, the state of the share partition will be: + // 1. 11 -> 20: ARCHIVED + // 2. 21 -> 30: ACQUIRED + // 3. 31 -> 40: ARCHIVED + // 4. 41 -> 50: ACQUIRED + // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal + // state when the corresponding acquisition lock timer task expires. + sharePartition.updateCacheAndOffsets(41); + + assertEquals(51, sharePartition.nextFetchOffset()); + assertEquals(41, sharePartition.startOffset()); + assertEquals(50, sharePartition.endOffset()); + + assertEquals(4, sharePartition.cachedState().size()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState()); + + // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these + // records will remain in the ACQUIRED state. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2)))); + + // The batch is still in ACQUIRED state. + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); + + // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be + // ARCHIVED. + sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run(); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState()); + } + + @Test + public void testLsoMovementForArchivingAllAvailableOffsets() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50. + fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10); + fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10); + + // After the acknowledgements, the share partition state will be: + // 1. 11 -> 20: AVAILABLE + // 2. 21 -> 30: ACQUIRED + // 3. 31 -> 40: AVAILABLE + // 4. 41 -> 50: ACQUIRED + sharePartition.acknowledge(MEMBER_ID, List.of( + new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)), + new ShareAcknowledgementBatch(31, 40, List.of((byte) 2)) + )); + + // Move the LSO to 36. When the LSO moves ahead, all records that are AVAILABLE before the new LSO will be ARCHIVED. + // Thus, the state of the share partition will be: + // 1. 11 -> 20: ARCHIVED + // 2. 21 -> 30: ACQUIRED + // 3. 31 -> 35: ARCHIVED + // 3. 36 -> 40: AVAILABLE + // 4. 41 -> 50: ACQUIRED + // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal + // state when the corresponding acquisition lock timer task expires. + sharePartition.updateCacheAndOffsets(36); + + assertEquals(36, sharePartition.nextFetchOffset()); + assertEquals(36, sharePartition.startOffset()); + assertEquals(50, sharePartition.endOffset()); + + assertEquals(4, sharePartition.cachedState().size()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(31L).state()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(32L).state()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(33L).state()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(34L).state()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(35L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(36L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(37L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(38L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(39L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(40L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState()); + + // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these + // records will remain in the ACQUIRED state. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2)))); + + // The batch is still in ACQUIRED state. + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); + + // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be + // ARCHIVED. + sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run(); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState()); + } + @Test public void testLsoMovementForArchivingOffsets() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); @@ -4600,7 +5553,7 @@ public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() { } @Test - public void testLsoMovementAheadOfEndOffsetPostAcknowledgment() { + public void testLsoMovementAheadOfEndOffsetPostAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); @@ -4716,7 +5669,7 @@ public void testLsoMovementWithGapsInCachedStateMapAndAcknowledgedBatch() { } @Test - public void testLsoMovementPostGapsInAcknowledgments() { + public void testLsoMovementPostGapsInAcknowledgements() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); MemoryRecords records1 = memoryRecords(2, 5); @@ -4816,7 +5769,7 @@ public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(20L).offsetState()); @@ -4830,8 +5783,8 @@ public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { expectedOffsetStateMap.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(35L).offsetState()); } @@ -4887,11 +5840,57 @@ public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToMiddleOfBa Map expectedOffsetStateMap = new HashMap<>(); expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + } + + @Test + public void testReleaseAcquiredRecordsDecreaseDeliveryCount() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); + fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(12, 13, List.of((byte) 1)))); + + // LSO is at 11. + sharePartition.updateCacheAndOffsets(11); + + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(11, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + + // Before release, the delivery count was incremented. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + + // Release acquired records. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + + // Check delivery count. + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + // After release, the delivery count was decremented. + expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @@ -4929,6 +5928,7 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovement() throws assertEquals(7, sharePartition.cachedState().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap1 = new HashMap<>(); @@ -4987,6 +5987,7 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToStartOf assertEquals(2, sharePartition.cachedState().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) && sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED && @@ -5015,6 +6016,7 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToMiddleO assertEquals(2, sharePartition.cachedState().size()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> { Map expectedOffsetStateMap = new HashMap<>(); @@ -5040,10 +6042,9 @@ public void testScheduleAcquisitionLockTimeoutValueFromGroupConfig() { Mockito.when(groupConfig.shareRecordLockDurationMs()).thenReturn(expectedDurationMs); SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withGroupConfigManager(groupConfigManager).build(); - SharePartition.AcquisitionLockTimerTask timerTask = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + AcquisitionLockTimerTask timerTask = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); Mockito.verify(groupConfig).shareRecordLockDurationMs(); @@ -5063,16 +6064,15 @@ public void testScheduleAcquisitionLockTimeoutValueUpdatesSuccessfully() { .thenReturn(expectedDurationMs2); SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withGroupConfigManager(groupConfigManager).build(); - SharePartition.AcquisitionLockTimerTask timerTask1 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + AcquisitionLockTimerTask timerTask1 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); Mockito.verify(groupConfig).shareRecordLockDurationMs(); assertEquals(expectedDurationMs1, timerTask1.delayMs); - SharePartition.AcquisitionLockTimerTask timerTask2 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + AcquisitionLockTimerTask timerTask2 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); Mockito.verify(groupConfigManager, Mockito.times(4)).groupConfig(GROUP_ID); Mockito.verify(groupConfig, Mockito.times(2)).shareRecordLockDurationMs(); @@ -5209,6 +6209,7 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws In assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.nextFetchOffset() == 7 && sharePartition.cachedState().isEmpty() && sharePartition.startOffset() == 7 && sharePartition.endOffset() == 7, @@ -5258,6 +6259,7 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOff assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); // Allowing acquisition lock to expire. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); TestUtils.waitForCondition( () -> sharePartition.nextFetchOffset() == 3 && sharePartition.cachedState().isEmpty() && sharePartition.startOffset() == 3 && sharePartition.endOffset() == 3, @@ -5480,7 +6482,7 @@ public void testWriteShareGroupStateFailure() { result = sharePartition.writeShareGroupState(anyList()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(FencedStateEpochException.class, result); + assertFutureThrows(NotLeaderOrFollowerException.class, result); // Mock Write state RPC to return error response, FENCED_LEADER_EPOCH. Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( @@ -5516,7 +6518,7 @@ public void testWriteShareGroupStateWithNoOpStatePersister() { } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementTypeAccept() { + public void testMaybeUpdateCachedStateWhenAcknowledgeTypeAccept() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); @@ -5536,7 +6538,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementTypeAccept() { } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementTypeReject() { + public void testMaybeUpdateCachedStateWhenAcknowledgeTypeReject() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); @@ -5556,7 +6558,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementTypeReject() { } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementTypeRelease() { + public void testMaybeUpdateCachedStateWhenAcknowledgeTypeRelease() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); @@ -5579,7 +6581,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementTypeRelease() { @Test public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchSubset() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(20) + .withMaxInflightRecords(20) .withState(SharePartitionState.ACTIVE) .build(); @@ -5605,7 +6607,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchS @Test public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntireBatch() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(20) + .withMaxInflightRecords(20) .withState(SharePartitionState.ACTIVE) .build(); fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 15); @@ -5630,7 +6632,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntire @Test public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(20) + .withMaxInflightRecords(20) .withState(SharePartitionState.ACTIVE) .build(); @@ -5659,7 +6661,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() { @Test public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(20) + .withMaxInflightRecords(20) .withState(SharePartitionState.ACTIVE) .build(); @@ -5681,7 +6683,7 @@ public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged @Test public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(100) + .withMaxInflightRecords(100) .withState(SharePartitionState.ACTIVE) .build(); @@ -5720,7 +6722,7 @@ public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() fetchAcquiredRecords(sharePartition, memoryRecords(100, 80), 100); assertFalse(sharePartition.canAcquireRecords()); - // Final Acknowledgment, all records are acknowledged here. + // Final Acknowledgement, all records are acknowledged here. sharePartition.acknowledge(MEMBER_ID, List.of( new ShareAcknowledgementBatch(50, 179, List.of((byte) 3)))); @@ -5767,7 +6769,7 @@ public void testMaybeUpdateCachedStateGapAfterLastOffsetAcknowledged() { fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); assertTrue(sharePartition.canAcquireRecords()); - // Sending acknowledgment for the first batch from 11 to 20 + // Sending acknowledgement for the first batch from 11 to 20 sharePartition.acknowledge(MEMBER_ID, List.of( new ShareAcknowledgementBatch(11, 20, List.of((byte) 1)))); @@ -5780,11 +6782,11 @@ public void testMaybeUpdateCachedStateGapAfterLastOffsetAcknowledged() { assertEquals(40, sharePartition.endOffset()); assertEquals(21, sharePartition.nextFetchOffset()); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); - assertEquals(21, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); } @Test @@ -6157,7 +7159,7 @@ public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() { @Test public void testNextFetchOffsetWithMultipleConsumers() { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightMessages(100) + .withMaxInflightRecords(100) .withState(SharePartitionState.ACTIVE) .build(); MemoryRecords records1 = memoryRecords(3, 0); @@ -6354,18 +7356,115 @@ public void testFindLastOffsetAcknowledgedWhenGapAtBeginning() { sharePartition.maybeInitialize(); - SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset(); - assertNotNull(initialReadGapOffset); + GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + assertNotNull(persisterReadResultGapWindow); + + // Since there is a gap in the beginning, the persisterReadResultGapWindow window is same as the cachedState + assertEquals(11, persisterReadResultGapWindow.gapStartOffset()); + assertEquals(40, persisterReadResultGapWindow.endOffset()); + + long lastOffsetAcknowledged = sharePartition.findLastOffsetAcknowledged(); + + // Since the persisterReadResultGapWindow window begins at startOffset, we cannot count any of the offsets as acknowledged. + // Thus, lastOffsetAcknowledged should be -1 + assertEquals(-1, lastOffsetAcknowledged); + } + + @Test + public void testCacheUpdateWhenBatchHasOngoingTransition() { + Persister persister = Mockito.mock(Persister.class); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + // Acquire a single batch. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, + fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM + ), 10 + ); + + // Validate that there is no ongoing transition. + assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + // Return a future which will be completed later, so the batch state has ongoing transition. + CompletableFuture future = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.ACCEPT.id)))); + + // Assert the start offset has not moved and batch has ongoing transition. + assertEquals(21L, sharePartition.startOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + + // Validate that offset can't be moved because batch has ongoing transition. + assertFalse(sharePartition.canMoveStartOffset()); + assertEquals(-1, sharePartition.findLastOffsetAcknowledged()); + + // Complete the future so acknowledge API can be completed, which updates the cache. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future.complete(writeShareGroupStateResult); + + // Validate the cache has been updated. + assertEquals(31L, sharePartition.startOffset()); + assertTrue(sharePartition.cachedState().isEmpty()); + } + + @Test + public void testCacheUpdateWhenOffsetStateHasOngoingTransition() { + Persister persister = Mockito.mock(Persister.class); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + // Acquire a single batch. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, + fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM + ), 10 + ); - // Since there is a gap in the beginning, the initialReadGapOffset window is same as the cachedState - assertEquals(11, initialReadGapOffset.gapStartOffset()); - assertEquals(40, initialReadGapOffset.endOffset()); + // Validate that there is no ongoing transition. + assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + assertNull(sharePartition.cachedState().get(21L).offsetState()); + // Return a future which will be completed later, so the batch state has ongoing transition. + CompletableFuture future = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + // Acknowledge offsets to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 23, List.of(AcknowledgeType.ACCEPT.id)))); - long lastOffsetAcknowledged = sharePartition.findLastOffsetAcknowledged(); + // Assert the start offset has not moved and offset state is now maintained. Offset state should + // have ongoing transition. + assertEquals(21L, sharePartition.startOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertNotNull(sharePartition.cachedState().get(21L).offsetState()); + assertTrue(sharePartition.cachedState().get(21L).offsetState().get(21L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(21L).offsetState().get(22L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(21L).offsetState().get(23L).hasOngoingStateTransition()); + // Only 21, 22 and 23 offsets should have ongoing state transition as the acknowledge request + // contains 21-23 offsets. + assertFalse(sharePartition.cachedState().get(21L).offsetState().get(24L).hasOngoingStateTransition()); + + // Validate that offset can't be moved because batch has ongoing transition. + assertFalse(sharePartition.canMoveStartOffset()); + assertEquals(-1, sharePartition.findLastOffsetAcknowledged()); + + // Complete the future so acknowledge API can be completed, which updates the cache. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future.complete(writeShareGroupStateResult); - // Since the initialReadGapOffset window begins at startOffset, we cannot count any of the offsets as acknowledged. - // Thus, lastOffsetAcknowledged should be -1 - assertEquals(-1, lastOffsetAcknowledged); + // Validate the cache has been updated. + assertEquals(24L, sharePartition.startOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertNotNull(sharePartition.cachedState().get(21L)); } /** @@ -6668,7 +7767,7 @@ public void testAcquireWhenBatchesRemovedForFetchOffsetForSameCachedBatch() { }); }); } - + private String assertionFailedMessage(SharePartition sharePartition, Map> offsets) { StringBuilder errorMessage = new StringBuilder(ACQUISITION_LOCK_NEVER_GOT_RELEASED + String.format( " timer size: %d, next fetch offset: %d\n", @@ -6893,6 +7992,7 @@ public void testAcquireWithReadCommittedIsolationLevel() { assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(69L).acquisitionLockTimeoutTask()); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void testContainsAbortMarker() { SharePartition sharePartition = SharePartitionBuilder.builder() @@ -7068,6 +8168,536 @@ public void testFetchAbortedTransactionRecordBatchesForAbortedAndCommittedTransa assertEquals(1, actual.get(3).producerId()); } + @Test + public void testFetchLockReleasedByDifferentId() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .build(); + Uuid fetchId1 = Uuid.randomUuid(); + Uuid fetchId2 = Uuid.randomUuid(); + + // Initially, fetch lock is not acquired. + assertNull(sharePartition.fetchLock()); + // fetchId1 acquires the fetch lock. + assertTrue(sharePartition.maybeAcquireFetchLock(fetchId1)); + // If we release fetch lock by fetchId2, it will work. Currently, we have kept the release of fetch lock as non-strict + // such that even if the caller's id for releasing fetch lock does not match the id that holds the lock, we will + // still release it. This has been done to avoid the scenarios where we hold the fetch lock for a share partition + // forever due to faulty code. In the future, we plan to make the locks handling strict, then this test case needs to be updated. + sharePartition.releaseFetchLock(fetchId2); + assertNull(sharePartition.fetchLock()); // Fetch lock has been released. + } + + @Test + public void testAcquireWhenBatchHasOngoingTransition() { + Persister persister = Mockito.mock(Persister.class); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + // Acquire a single batch with member-1. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, + fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM + ), 10 + ); + + // Validate that there is no ongoing transition. + assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + // Return a future which will be completed later, so the batch state has ongoing transition. + CompletableFuture future = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.RELEASE.id)))); + + // Assert the start offset has not moved and batch has ongoing transition. + assertEquals(21L, sharePartition.startOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId()); + + // Acquire the same batch with member-2. This function call will return with 0 records since there is an ongoing + // transition for this batch. + fetchAcquiredRecords( + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 21, + fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM + ), 0 + ); + + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId()); + + // Complete the future so acknowledge API can be completed, which updates the cache. Now the records can be acquired. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future.complete(writeShareGroupStateResult); + + // Acquire the same batch with member-2. 10 records will be acquired. + fetchAcquiredRecords( + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 21, + fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM + ), 10 + ); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); + assertEquals("member-2", sharePartition.cachedState().get(21L).batchMemberId()); + } + + @Test + public void testNextFetchOffsetWhenBatchHasOngoingTransition() { + Persister persister = Mockito.mock(Persister.class); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + + // Acquire a single batch 0-9 with member-1. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, + fetchPartitionData(memoryRecords(10, 0)), FETCH_ISOLATION_HWM + ), 10 + ); + + // Acquire a single batch 10-19 with member-1. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 10, + fetchPartitionData(memoryRecords(10, 10)), FETCH_ISOLATION_HWM + ), 10 + ); + + // Validate that there is no ongoing transition. + assertEquals(2, sharePartition.cachedState().size()); + assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + + // Return futures which will be completed later, so the batch state has ongoing transition. + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + + // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for + // offsets 0-9 and 10-19 respectively. + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 9, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 19, List.of(AcknowledgeType.RELEASE.id)))); + + // Complete future2 so second acknowledge API can be completed, which updates the cache. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future2.complete(writeShareGroupStateResult); + + // Offsets 0-9 will have ongoing state transition since future1 is not complete yet. + // Offsets 10-19 won't have ongoing state transition since future2 has been completed. + assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition()); + + // nextFetchOffset should return 10 and not 0 because batch 0-9 is undergoing state transition. + assertEquals(10, sharePartition.nextFetchOffset()); + } + + @Test + public void testNextFetchOffsetWhenOffsetsHaveOngoingTransition() { + Persister persister = Mockito.mock(Persister.class); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + + // Acquire a single batch 0-50 with member-1. + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, + fetchPartitionData(memoryRecords(50, 0)), FETCH_ISOLATION_HWM + ), 50 + ); + + // Validate that there is no ongoing transition. + assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); + + // Return futures which will be completed later, so the batch state has ongoing transition. + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + + // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for + // offsets 5-9 and 20-24 respectively. + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id)))); + + // Complete future2 so second acknowledge API can be completed, which updates the cache. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future2.complete(writeShareGroupStateResult); + + // Offsets 5-9 will have ongoing state transition since future1 is not complete yet. + // Offsets 20-24 won't have ongoing state transition since future2 has been completed. + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(5L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(6L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(7L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(8L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(9L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(20L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(21L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(22L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(23L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(24L).hasOngoingStateTransition()); + + // nextFetchOffset should return 20 and not 5 because offsets 5-9 is undergoing state transition. + assertEquals(20, sharePartition.nextFetchOffset()); + } + + @Test + public void testAcquisitionLockTimeoutWithConcurrentAcknowledgement() throws InterruptedException { + Persister persister = Mockito.mock(Persister.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withPersister(persister) + .build(); + + // Create 2 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 0).close(); + memoryRecordsBuilder(buffer, 15, 5).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Acquire 10 records. + fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 5, /* Batch size of 5 so cache can have 2 entries */ + 10, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 0), + FETCH_ISOLATION_HWM), + 20); + + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(2, sharePartition.timer().size()); + + // Return 2 future which will be completed later. + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + // Store the corresponding batch timer tasks. + TimerTask timerTask1 = sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask(); + TimerTask timerTask2 = sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask(); + + // Acknowledge 1 offset in first batch as Accept to create offset tracking, accept complete + // sencond batch. And mark offset 0 as release so cached state do not move ahead. + sharePartition.acknowledge(MEMBER_ID, List.of( + new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RELEASE.id)), + new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id)), + new ShareAcknowledgementBatch(5, 19, List.of(AcknowledgeType.ACCEPT.id)))); + + // Assert the start offset has not moved. + assertEquals(0L, sharePartition.startOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + // Verify ongoing transition states. + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(0L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(0L).offsetState().get(1L).hasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(0L).offsetState().get(2L).hasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(5L).batchHasOngoingStateTransition()); + + // Validate first timer task is already cancelled. + assertTrue(timerTask1.isCancelled()); + assertFalse(timerTask2.isCancelled()); + + // Fetch offset state timer tasks. + TimerTask timerTaskOffsetState1 = sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask(); + TimerTask timerTaskOffsetState2 = sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask(); + TimerTask timerTaskOffsetState3 = sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask(); + + // Complete futures. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + future1.complete(writeShareGroupStateResult); + future2.complete(writeShareGroupStateResult); + + // Verify timer tasks are now cancelled, except unacknowledged offsets. + assertEquals(2, sharePartition.cachedState().size()); + assertTrue(timerTask2.isCancelled()); + assertTrue(timerTaskOffsetState1.isCancelled()); + assertTrue(timerTaskOffsetState2.isCancelled()); + assertFalse(timerTaskOffsetState3.isCancelled()); + + // Verify the state prior executing the timer tasks. + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + + // Running expired timer tasks should not mark offsets available, except for offset 2. + timerTask1.run(); + // State should remain same. + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + + timerTask2.run(); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + + timerTaskOffsetState2.run(); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + + // Should update the state to available as the timer task is not yet expired. + timerTaskOffsetState3.run(); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + } + + @Test + public void testLsoMovementWithWriteStateRPCFailuresInAcknowledgement() { + Persister persister = Mockito.mock(Persister.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withPersister(persister) + .build(); + + fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); + fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + + // Validate that there is no ongoing transition. + assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); + assertFalse(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); + + // Return futures which will be completed later, so the batch state has ongoing transition. + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + + // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for + // offsets 2-6 and 7-11 respectively. + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.RELEASE.id)))); + + // Validate that there is no ongoing transition. + assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); + + // Move LSO to 7, so some records/offsets can be marked archived for the first batch. + sharePartition.updateCacheAndOffsets(7L); + + // Start offset will be moved. + assertEquals(12L, sharePartition.nextFetchOffset()); + assertEquals(7L, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(7L).batchState()); + + // Complete future1 exceptionally so acknowledgement for 2-6 offsets will be completed. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + future1.complete(writeShareGroupStateResult); + + // The completion of future1 with exception should not impact the cached state since those records have already + // been archived. + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); + assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(7L).batchState()); + + future2.complete(writeShareGroupStateResult); + assertEquals(12L, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + } + + @Test + public void testAcquisitionLockTimeoutWithWriteStateRPCFailure() throws InterruptedException { + Persister persister = Mockito.mock(Persister.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withPersister(persister) + .build(); + + fetchAcquiredRecords( + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, + fetchPartitionData(memoryRecords(2, 0)), FETCH_ISOLATION_HWM + ), 2 + ); + + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + + // Return a future which will be completed later, so the batch state has ongoing transition. + CompletableFuture future = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + + // Acknowledge batch to create ongoing transition. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 1, List.of(AcknowledgeType.ACCEPT.id)))); + // Assert the start offset has not moved and batch has ongoing transition. + assertEquals(0L, sharePartition.startOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + // Timer task has not been expired yet. + assertFalse(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired()); + + // Allowing acquisition lock to expire. This will not cause any change because the record is not in ACQUIRED state. + // This will remove the entry of the timer task from timer. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); + TestUtils.waitForCondition( + () -> sharePartition.cachedState().get(0L).batchState() == RecordState.ACKNOWLEDGED && + sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && + sharePartition.timer().size() == 0, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); + + // Acquisition lock timeout task has run already and is not null. + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + // Timer task should be expired now. + assertTrue(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired()); + + // Complete future exceptionally so acknowledgement for 0-1 offsets will be completed. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + future.complete(writeShareGroupStateResult); + + // Even though write state RPC has failed and corresponding acquisition lock timeout task has expired, + // the record should not stuck in ACQUIRED state with no acquisition lock timeout task. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + } + + @Test + public void testRecordArchivedWithWriteStateRPCFailure() throws InterruptedException { + Persister persister = Mockito.mock(Persister.class); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withMaxDeliveryCount(2) + .withPersister(persister) + .build(); + + fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); + fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + + // Futures which will be completed later, so the batch state has ongoing transition. + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + // Acknowledge batches. + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id)))); + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id)))); + + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); + assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); + + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + + future1.complete(writeShareGroupStateResult); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); + assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); + + future2.complete(writeShareGroupStateResult); + assertEquals(12L, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); + assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); + + // Allowing acquisition lock to expire. This will also ensure that acquisition lock timeout task + // is run successfully post write state RPC failure. + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); + TestUtils.waitForCondition( + () -> sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.AVAILABLE && + sharePartition.cachedState().get(7L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount() == 1 && + sharePartition.cachedState().get(7L).batchDeliveryCount() == 1 && + sharePartition.timer().size() == 0, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of()))); + // Acquisition lock timeout task has run already and next fetch offset is moved to 2. + assertEquals(2, sharePartition.nextFetchOffset()); + // Send the same batches again. + fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); + fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + + future1 = new CompletableFuture<>(); + future2 = new CompletableFuture<>(); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id)))); + sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id)))); + + mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); + // Verify the timer tasks have run and the state is archived for the offsets which are not acknowledged, + // but the acquisition lock timeout task should be just expired for acknowledged offsets, though + // the state should not be archived. + TestUtils.waitForCondition( + () -> sharePartition.cachedState().get(2L).offsetState().get(2L).state() == RecordState.ARCHIVED && + sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.ACKNOWLEDGED && + sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask().hasExpired() && + sharePartition.cachedState().get(7L).batchState() == RecordState.ACKNOWLEDGED && + sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask().hasExpired(), + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of()))); + + future1.complete(writeShareGroupStateResult); + // Now the state should be archived for the offsets despite the write state RPC failure, as the + // delivery count has reached the max delivery count and the acquisition lock timeout task + // has already expired for the offsets which were acknowledged. + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); + assertEquals(2, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount()); + + future2.complete(writeShareGroupStateResult); + assertEquals(12L, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount()); + } + /** * This function produces transactional data of a given no. of records followed by a transactional marker (COMMIT/ABORT). */ @@ -7208,7 +8838,7 @@ private static class SharePartitionBuilder { private int defaultAcquisitionLockTimeoutMs = 30000; private int maxDeliveryCount = MAX_DELIVERY_COUNT; - private int maxInflightMessages = MAX_IN_FLIGHT_MESSAGES; + private int maxInflightRecords = MAX_IN_FLIGHT_RECORDS; private Persister persister = new NoOpStatePersister(); private ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); @@ -7217,8 +8847,8 @@ private static class SharePartitionBuilder { private Time time = MOCK_TIME; private SharePartitionMetrics sharePartitionMetrics = Mockito.mock(SharePartitionMetrics.class); - private SharePartitionBuilder withMaxInflightMessages(int maxInflightMessages) { - this.maxInflightMessages = maxInflightMessages; + private SharePartitionBuilder withMaxInflightRecords(int maxInflightRecords) { + this.maxInflightRecords = maxInflightRecords; return this; } @@ -7267,7 +8897,7 @@ public static SharePartitionBuilder builder() { } public SharePartition build() { - return new SharePartition(GROUP_ID, TOPIC_ID_PARTITION, 0, maxInflightMessages, maxDeliveryCount, + return new SharePartition(GROUP_ID, TOPIC_ID_PARTITION, 0, maxInflightRecords, maxDeliveryCount, defaultAcquisitionLockTimeoutMs, mockTimer, time, persister, replicaManager, groupConfigManager, state, Mockito.mock(SharePartitionListener.class), sharePartitionMetrics); } diff --git a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala index 4f66dd9e311ae..2ac15a29e20bb 100644 --- a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala +++ b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala @@ -19,24 +19,14 @@ package kafka.admin import kafka.api.IntegrationTestHarness import kafka.server.KafkaConfig import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} -import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.common.config.{ConfigException, ConfigResource, TopicConfig} -import org.apache.kafka.common.errors.{InvalidConfigurationException, UnknownTopicOrPartitionException} -import org.apache.kafka.common.utils.MockTime +import org.apache.kafka.common.config.{ConfigException, TopicConfig} import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.server.log.remote.storage.{NoOpRemoteLogMetadataManager, NoOpRemoteStorageManager, RemoteLogManagerConfig, RemoteLogSegmentId, RemoteLogSegmentMetadata, RemoteLogSegmentState} +import org.apache.kafka.server.log.remote.storage._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.function.Executable -import org.junit.jupiter.api.{BeforeEach, Tag, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{CsvSource, ValueSource} +import org.junit.jupiter.api.{BeforeEach, Tag, Test, TestInfo} -import java.util -import java.util.concurrent.atomic.AtomicInteger -import java.util.{Collections, Optional, Properties} +import java.util.Properties import scala.collection.Seq -import scala.concurrent.ExecutionException import scala.util.Random @Tag("integration") @@ -62,439 +52,12 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { @BeforeEach override def setUp(info: TestInfo): Unit = { - if (info.getTestMethod.get().getName.endsWith("SystemRemoteStorageIsDisabled")) { - sysRemoteStorageEnabled = false - } - if (info.getTestMethod.get().getName.equals("testTopicDeletion")) { - storageManagerClassName = classOf[MyRemoteStorageManager].getName - metadataManagerClassName = classOf[MyRemoteLogMetadataManager].getName - } super.setUp(info) testTopicName = s"${info.getTestMethod.get().getName}-${Random.alphanumeric.take(10).mkString}" } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithValidRetentionTime(quorum: String): Unit = { - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithValidRetentionSize(quorum: String): Unit = { - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "512") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "256") - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithInheritedLocalRetentionTime(quorum: String): Unit = { - // inherited local retention ms is 1000 - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1001") - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithInheritedLocalRetentionSize(quorum: String): Unit = { - // inherited local retention bytes is 1024 - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1025") - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithInvalidRetentionTime(quorum: String): Unit = { - // inherited local retention ms is 1000 - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") - assertThrowsException(classOf[InvalidConfigurationException], () => - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig)) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateRemoteTopicWithInvalidRetentionSize(quorum: String): Unit = { - // inherited local retention bytes is 1024 - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "512") - assertThrowsException(classOf[InvalidConfigurationException], () => - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig)) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateCompactedRemoteStorage(quorum: String): Unit = { - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact") - assertThrowsException(classOf[InvalidConfigurationException], () => - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig)) - } - - // `remote.log.delete.on.disable` and `remote.log.copy.disable` only works in KRaft mode. - @ParameterizedTest - @CsvSource(Array("kraft,true,true", "kraft,true,false", "kraft,false,true", "kraft,false,false")) - def testCreateRemoteTopicWithCopyDisabledAndDeleteOnDisable(quorum: String, copyDisabled: Boolean, deleteOnDisable: Boolean): Unit = { - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, copyDisabled.toString) - topicConfig.put(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, deleteOnDisable.toString) - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - verifyRemoteLogTopicConfigs(topicConfig) - } - - // `remote.log.delete.on.disable` only works in KRaft mode. - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateTopicRetentionMsValidationWithRemoteCopyDisabled(quorum: String): Unit = { - val testTopicName2 = testTopicName + "2" - val testTopicName3 = testTopicName + "3" - val errorMsgMs = "When `remote.log.copy.disable` is set to true, the `local.retention.ms` and `retention.ms` " + - "must be set to the identical value because there will be no more logs copied to the remote storage." - - // 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.ms and retention.ms value, - // it should fail to create the topic - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") - - val admin = createAdminClient() - val err = assertThrowsException(classOf[InvalidConfigurationException], - () => TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, - numReplicationFactor, topicConfig = topicConfig)) - assertEquals(errorMsgMs, err.getMessage) - - // 2. change the local.retention.ms value to the same value as retention.ms should successfully create the topic - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 3. change the local.retention.ms value to "-2" should also successfully create the topic - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") - TestUtils.createTopicWithAdmin(admin, testTopicName2, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.ms and retention.ms value, - // it should successfully creates the topic. - topicConfig.clear() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") - TestUtils.createTopicWithAdmin(admin, testTopicName3, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - )) - val err2 = assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get()) - assertEquals(errorMsgMs, err2.getMessage) - - // 6. alter the config to `remote.log.copy.disable=true` and local.retention.ms == retention.ms, it should work without error - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000"), - AlterConfigOp.OpType.SET), - )) - - admin.incrementalAlterConfigs(configs).all().get() - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateTopicRetentionBytesValidationWithRemoteCopyDisabled(quorum: String): Unit = { - val testTopicName2 = testTopicName + "2" - val testTopicName3 = testTopicName + "3" - val errorMsgBytes = "When `remote.log.copy.disable` is set to true, the `local.retention.bytes` and `retention.bytes` " + - "must be set to the identical value because there will be no more logs copied to the remote storage." - - // 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.bytes and retention.bytes value, - // it should fail to create the topic - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100") - topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") - - val admin = createAdminClient() - val err = assertThrowsException(classOf[InvalidConfigurationException], - () => TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, - numReplicationFactor, topicConfig = topicConfig)) - assertEquals(errorMsgBytes, err.getMessage) - - // 2. change the local.retention.bytes value to the same value as retention.bytes should successfully create the topic - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 3. change the local.retention.bytes value to "-2" should also successfully create the topic - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") - TestUtils.createTopicWithAdmin(admin, testTopicName2, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.bytes and retention.bytes value, - // it should successfully creates the topic. - topicConfig.clear() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100") - topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") - TestUtils.createTopicWithAdmin(admin, testTopicName3, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - )) - val err2 = assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get()) - assertEquals(errorMsgBytes, err2.getMessage) - - // 6. alter the config to `remote.log.copy.disable=true` and local.retention.bytes == retention.bytes, it should work without error - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000"), - AlterConfigOp.OpType.SET), - )) - admin.incrementalAlterConfigs(configs).all().get() - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testEnableRemoteLogOnExistingTopicTest(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties() - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - Collections.singleton( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET)) - ) - admin.incrementalAlterConfigs(configs).all().get() - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testEnableRemoteLogWhenSystemRemoteStorageIsDisabled(quorum: String): Unit = { - val admin = createAdminClient() - - val topicConfigWithRemoteStorage = new Properties() - topicConfigWithRemoteStorage.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - val message = assertThrowsException(classOf[InvalidConfigurationException], - () => TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, - numReplicationFactor, topicConfig = topicConfigWithRemoteStorage)) - assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) - - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor) - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - Collections.singleton( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET)) - ) - val errorMessage = assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get()) - assertTrue(errorMessage.getMessage.contains("Tiered Storage functionality is disabled in the broker")) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithValidRetentionTimeTest(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"), - AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100"), - AlterConfigOp.OpType.SET) - )) - admin.incrementalAlterConfigs(configs).all().get() - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithValidRetentionSizeTest(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "200"), - AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100"), - AlterConfigOp.OpType.SET) - )) - admin.incrementalAlterConfigs(configs).all().get() - verifyRemoteLogTopicConfigs(topicConfig) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithInheritedLocalRetentionTime(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // inherited local retention ms is 1000 - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"), - AlterConfigOp.OpType.SET), - )) - assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get()) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithInheritedLocalRetentionSize(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - // inherited local retention bytes is 1024 - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "512"), - AlterConfigOp.OpType.SET), - )) - assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get(), "Invalid local retention size") - } - - // The remote storage config validation on controller level only works in KRaft - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithDisablingRemoteStorage(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties - topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), - AlterConfigOp.OpType.SET), - )) - assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get(), "It is invalid to disable remote storage without deleting remote data. " + - "If you want to keep the remote data and turn to read only, please set `remote.storage.enable=true,remote.log.copy.disable=true`. " + - "If you want to disable remote storage and delete all remote data, please set `remote.storage.enable=false,remote.log.delete.on.disable=true`.") - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateTopicConfigWithDisablingRemoteStorageWithDeleteOnDisable(quorum: String): Unit = { - val admin = createAdminClient() - val topicConfig = new Properties - topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), - AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET) - )) - admin.incrementalAlterConfigs(configs).all().get() - - val newProps = new Properties() - configs.get(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName)).forEach { op => - newProps.setProperty(op.configEntry().name(), op.configEntry().value()) - } - - verifyRemoteLogTopicConfigs(newProps) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTopicDeletion(quorum: String): Unit = { - MyRemoteStorageManager.deleteSegmentEventCounter.set(0) - val numPartitions = 2 - val topicConfig = new Properties() - topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") - topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") - TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, brokerCount, - topicConfig = topicConfig) - TestUtils.deleteTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers) - assertThrowsException(classOf[UnknownTopicOrPartitionException], - () => TestUtils.describeTopic(createAdminClient(), testTopicName), "Topic should be deleted") - TestUtils.waitUntilTrue(() => - numPartitions * MyRemoteLogMetadataManager.segmentCountPerPartition == MyRemoteStorageManager.deleteSegmentEventCounter.get(), - "Remote log segments should be deleted only once by the leader") - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterWideDisablementOfTieredStorageWithEnabledTieredTopic(quorum: String): Unit = { + @Test + def testClusterWideDisablementOfTieredStorageWithEnabledTieredTopic(): Unit = { val topicConfig = new Properties() topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") @@ -510,9 +73,8 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { faultHandler.setIgnore(true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterWithoutTieredStorageStartsSuccessfullyIfTopicWithTieringDisabled(quorum: String): Unit = { + @Test + def testClusterWithoutTieredStorageStartsSuccessfullyIfTopicWithTieringDisabled(): Unit = { val topicConfig = new Properties() topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, false.toString) @@ -525,63 +87,6 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { recreateBrokers(startup = true) } - private def assertThrowsException(exceptionType: Class[_ <: Throwable], - executable: Executable, - message: String = ""): Throwable = { - assertThrows(exceptionType, () => { - try { - executable.execute() - } catch { - case e: ExecutionException => throw e.getCause - } - }, message) - } - - private def verifyRemoteLogTopicConfigs(topicConfig: Properties): Unit = { - TestUtils.waitUntilTrue(() => { - val logBuffer = brokers.flatMap(_.logManager.getLog(new TopicPartition(testTopicName, 0))) - var result = logBuffer.nonEmpty - if (result) { - if (topicConfig.containsKey(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG).toBoolean == - logBuffer.head.config.remoteStorageEnable() - } - if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG).toLong == - logBuffer.head.config.localRetentionBytes() - } - if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG).toLong == - logBuffer.head.config.localRetentionMs() - } - if (topicConfig.containsKey(TopicConfig.RETENTION_MS_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.RETENTION_MS_CONFIG).toLong == - logBuffer.head.config.retentionMs - } - if (topicConfig.containsKey(TopicConfig.RETENTION_BYTES_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.RETENTION_BYTES_CONFIG).toLong == - logBuffer.head.config.retentionSize - } - if (topicConfig.contains(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG).toBoolean == - logBuffer.head.config.remoteLogCopyDisable() - } - if (topicConfig.contains(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG)) { - result = result && - topicConfig.getProperty(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG).toBoolean == - logBuffer.head.config.remoteLogDeleteOnDisable() - } - } - result - }, s"Failed to update topic config $topicConfig") - } - private def overrideProps(): Properties = { val props = new Properties() props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, sysRemoteStorageEnabled.toString) @@ -593,40 +98,4 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { props.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP, "1024") props } -} - -object MyRemoteStorageManager { - val deleteSegmentEventCounter = new AtomicInteger(0) -} - -class MyRemoteStorageManager extends NoOpRemoteStorageManager { - import MyRemoteStorageManager._ - - override def deleteLogSegmentData(remoteLogSegmentMetadata: RemoteLogSegmentMetadata): Unit = { - deleteSegmentEventCounter.incrementAndGet() - } -} - -class MyRemoteLogMetadataManager extends NoOpRemoteLogMetadataManager { - - import MyRemoteLogMetadataManager._ - val time = new MockTime() - - override def listRemoteLogSegments(topicIdPartition: TopicIdPartition): util.Iterator[RemoteLogSegmentMetadata] = { - val segmentMetadataList = new util.ArrayList[RemoteLogSegmentMetadata]() - for (idx <- 0 until segmentCountPerPartition) { - val timestamp = time.milliseconds() - val startOffset = idx * recordsPerSegment - val endOffset = startOffset + recordsPerSegment - 1 - val segmentLeaderEpochs: util.Map[Integer, java.lang.Long] = Collections.singletonMap(0, 0L) - segmentMetadataList.add(new RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid()), startOffset, endOffset, timestamp, 0, timestamp, segmentSize, Optional.empty(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, segmentLeaderEpochs)) - } - segmentMetadataList.iterator() - } -} - -object MyRemoteLogMetadataManager { - val segmentCountPerPartition = 10 - val recordsPerSegment = 100 - val segmentSize = 1024 -} +} \ No newline at end of file diff --git a/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala index dc836352787b2..fafce17382c27 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala @@ -71,6 +71,7 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { val brokerId: Integer = 0 val topic = "topic" + val sourceTopic = "source-topic" val topicPattern = "topic.*" val transactionalId = "transactional.id" val producerId = 83392L @@ -80,11 +81,16 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { val tp = new TopicPartition(topic, part) val logDir = "logDir" val group = "my-group" + val shareGroup = "share-group" + val streamsGroup = "streams-group" val protocolType = "consumer" val protocolName = "consumer-range" val clusterResource = new ResourcePattern(CLUSTER, Resource.CLUSTER_NAME, LITERAL) val topicResource = new ResourcePattern(TOPIC, topic, LITERAL) + val sourceTopicResource = new ResourcePattern(TOPIC, sourceTopic, LITERAL) val groupResource = new ResourcePattern(GROUP, group, LITERAL) + val shareGroupResource = new ResourcePattern(GROUP, shareGroup, LITERAL) + val streamsGroupResource = new ResourcePattern(GROUP, streamsGroup, LITERAL) val transactionalIdResource = new ResourcePattern(TRANSACTIONAL_ID, transactionalId, LITERAL) producerConfig.setProperty(ProducerConfig.ACKS_CONFIG, "1") @@ -109,6 +115,7 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1") properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") + properties.put(GroupCoordinatorConfig.CONSUMER_GROUP_REGEX_REFRESH_INTERVAL_MS_CONFIG, "10000") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, "1") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1") diff --git a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala index 517614d84a11f..6a60621308bc2 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala @@ -94,12 +94,14 @@ abstract class AbstractConsumerTest extends BaseRequestTest { def awaitNonEmptyRecords[K, V](consumer: Consumer[K, V], partition: TopicPartition, pollTimeoutMs: Long = 100): ConsumerRecords[K, V] = { + var result: ConsumerRecords[K, V] = null + TestUtils.pollRecordsUntilTrue(consumer, (polledRecords: ConsumerRecords[K, V]) => { - if (polledRecords.records(partition).asScala.nonEmpty) - return polledRecords - false + val hasRecords = !polledRecords.records(partition).isEmpty + if (hasRecords) result = polledRecords + hasRecords }, s"Consumer did not consume any messages for partition $partition before timeout.", JTestUtils.DEFAULT_MAX_WAIT_MS, pollTimeoutMs) - throw new IllegalStateException("Should have timed out before reaching here") + result } /** diff --git a/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala deleted file mode 100644 index 3c73bf13f8bf3..0000000000000 --- a/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package kafka.api - -import java.util -import java.util.{Collections, Properties} -import kafka.integration.KafkaServerTestHarness -import kafka.server.KafkaConfig -import kafka.utils.{Logging, TestUtils} -import org.apache.kafka.clients.admin.AlterConfigOp.OpType -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsOptions, ConfigEntry} -import org.apache.kafka.common.config.{ConfigResource, SslConfigs, TopicConfig} -import org.apache.kafka.common.errors.{InvalidConfigurationException, InvalidRequestException, PolicyViolationException} -import org.apache.kafka.common.utils.Utils -import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.server.config.{ServerConfigs, ServerLogConfigs} -import org.apache.kafka.server.policy.AlterConfigPolicy -import org.apache.kafka.storage.internals.log.LogConfig -import org.apache.kafka.test.TestUtils.assertFutureThrows -import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertTrue} -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -import scala.collection.mutable -import scala.jdk.CollectionConverters._ - -/** - * Tests AdminClient calls when the broker is configured with policies like AlterConfigPolicy, CreateTopicPolicy, etc. - */ -@Timeout(120) -class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with Logging { - - import AdminClientWithPoliciesIntegrationTest._ - - var client: Admin = _ - val brokerCount = 3 - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - super.setUp(testInfo) - TestUtils.waitUntilBrokerMetadataIsPropagated(brokers) - } - - @AfterEach - override def tearDown(): Unit = { - if (client != null) - Utils.closeQuietly(client, "AdminClient") - super.tearDown() - } - - def createConfig: util.Map[String, Object] = - Map[String, Object](AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers()).asJava - - override def generateConfigs: collection.Seq[KafkaConfig] = { - val configs = TestUtils.createBrokerConfigs(brokerCount) - configs.foreach(overrideNodeConfigs) - configs.map(KafkaConfig.fromProps) - } - - override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { - val props = new Properties() - overrideNodeConfigs(props) - Seq(props) - } - - private def overrideNodeConfigs(props: Properties): Unit = { - props.put(ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, classOf[Policy]) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testValidAlterConfigs(quorum: String): Unit = { - client = Admin.create(createConfig) - // Create topics - val topic1 = "describe-alter-configs-topic-1" - val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) - val topicConfig1 = new Properties - val maxMessageBytes = "500000" - val retentionMs = "60000000" - topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageBytes) - topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, retentionMs) - createTopic(topic1, 1, 1, topicConfig1) - - val topic2 = "describe-alter-configs-topic-2" - val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) - createTopic(topic2) - - PlaintextAdminIntegrationTest.checkValidAlterConfigs(client, this, topicResource1, topicResource2, maxMessageBytes, retentionMs) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testInvalidAlterConfigs(quorum: String): Unit = { - client = Admin.create(createConfig) - PlaintextAdminIntegrationTest.checkInvalidAlterConfigs(this, client) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testInvalidAlterConfigsDueToPolicy(quorum: String): Unit = { - client = Admin.create(createConfig) - - // Create topics - val topic1 = "invalid-alter-configs-due-to-policy-topic-1" - val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) - createTopic(topic1) - - val topic2 = "invalid-alter-configs-due-to-policy-topic-2" - val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) - createTopic(topic2) - - val topic3 = "invalid-alter-configs-due-to-policy-topic-3" - val topicResource3 = new ConfigResource(ConfigResource.Type.TOPIC, topic3) - createTopic(topic3) - - // Set a mutable broker config - val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, brokers.head.config.brokerId.toString) - var alterResult = client.incrementalAlterConfigs(Collections.singletonMap(brokerResource, - util.Arrays.asList(new AlterConfigOp(new ConfigEntry(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "50000"), OpType.SET)))) - alterResult.all.get - assertEquals(Set(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG), validationsForResource(brokerResource).head.configs().keySet().asScala) - validations.clear() - - val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - alterConfigs.put(topicResource1, util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), - new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2"), OpType.SET) - )) - - alterConfigs.put(topicResource2, util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.8"), OpType.SET), - )) - - alterConfigs.put(topicResource3, util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "-1"), OpType.SET), - )) - - alterConfigs.put(brokerResource, util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "12313"), OpType.SET), - )) - - // Alter configs: second is valid, the others are invalid - alterResult = client.incrementalAlterConfigs(alterConfigs) - - assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) - assertFutureThrows(classOf[PolicyViolationException], alterResult.values.get(topicResource1)) - alterResult.values.get(topicResource2).get - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource3)) - assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) - assertTrue(validationsForResource(brokerResource).isEmpty, - "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") - validations.clear() - - // Verify that the second resource was updated and the others were not - ensureConsistentKRaftMetadata() - var describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, topicResource3, brokerResource).asJava) - var configs = describeResult.all.get - assertEquals(4, configs.size) - - assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - assertEquals(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT.toString, configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value) - - assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - - assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) - - // Alter configs with validateOnly = true: only second is valid - alterConfigs.put(topicResource2, util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.7"), OpType.SET), - )) - - alterResult = client.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) - - assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) - assertFutureThrows(classOf[PolicyViolationException], alterResult.values.get(topicResource1)) - alterResult.values.get(topicResource2).get - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource3)) - assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) - assertTrue(validationsForResource(brokerResource).isEmpty, - "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") - validations.clear() - - // Verify that no resources are updated since validate_only = true - ensureConsistentKRaftMetadata() - describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, topicResource3, brokerResource).asJava) - configs = describeResult.all.get - assertEquals(4, configs.size) - - assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - assertEquals(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT.toString, configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value) - - assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - - assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) - - // Do an incremental alter config on the broker, ensure we don't see the broker config we set earlier in the policy - alterResult = client.incrementalAlterConfigs(Map( - brokerResource -> - Seq(new AlterConfigOp( - new ConfigEntry(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, "9999"), OpType.SET) - ).asJavaCollection - ).asJava) - alterResult.all.get - assertEquals(Set(SocketServerConfigs.MAX_CONNECTIONS_CONFIG), validationsForResource(brokerResource).head.configs().keySet().asScala) - } - -} - -object AdminClientWithPoliciesIntegrationTest { - - val validations = new mutable.ListBuffer[AlterConfigPolicy.RequestMetadata]() - - def validationsForResource(resource: ConfigResource): Seq[AlterConfigPolicy.RequestMetadata] = { - validations.filter { req => req.resource().equals(resource) }.toSeq - } - - class Policy extends AlterConfigPolicy { - - var configs: Map[String, _] = _ - var closed = false - - def configure(configs: util.Map[String, _]): Unit = { - validations.clear() - this.configs = configs.asScala.toMap - } - - def validate(requestMetadata: AlterConfigPolicy.RequestMetadata): Unit = { - validations.append(requestMetadata) - require(!closed, "Policy should not be closed") - require(configs.nonEmpty, "configure should have been called with non empty configs") - require(!requestMetadata.configs.isEmpty, "request configs should not be empty") - require(requestMetadata.resource.name.nonEmpty, "resource name should not be empty") - if (requestMetadata.configs.containsKey(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG)) - throw new PolicyViolationException("Min in sync replicas cannot be updated") - } - - def close(): Unit = closed = true - - } -} diff --git a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala index 7fccbdc9e28c9..bfcc0bb0d4fca 100644 --- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala @@ -17,11 +17,12 @@ import java.time.Duration import java.util import java.util.concurrent.{ExecutionException, Semaphore} import java.util.regex.Pattern -import java.util.{Collections, Optional, Properties} +import java.util.{Comparator, Optional, Properties, UUID} import kafka.utils.{TestInfoUtils, TestUtils} import kafka.utils.TestUtils.waitUntilTrue import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ListGroupsOptions, NewTopic} import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.consumer.internals.{StreamsRebalanceData, StreamsRebalanceListener} import org.apache.kafka.clients.producer._ import org.apache.kafka.common.acl.AclOperation._ import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} @@ -37,26 +38,23 @@ import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProt import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetForLeaderPartition, OffsetForLeaderTopic, OffsetForLeaderTopicCollection} -import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AlterPartitionReassignmentsRequestData, AlterReplicaLogDirsRequestData, ConsumerGroupDescribeRequestData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, CreateAclsRequestData, CreatePartitionsRequestData, CreateTopicsRequestData, DeleteAclsRequestData, DeleteGroupsRequestData, DeleteRecordsRequestData, DeleteTopicsRequestData, DescribeClusterRequestData, DescribeConfigsRequestData, DescribeGroupsRequestData, DescribeLogDirsRequestData, DescribeProducersRequestData, DescribeTransactionsRequestData, FetchResponseData, FindCoordinatorRequestData, HeartbeatRequestData, IncrementalAlterConfigsRequestData, JoinGroupRequestData, ListPartitionReassignmentsRequestData, ListTransactionsRequestData, MetadataRequestData, OffsetCommitRequestData, ProduceRequestData, SyncGroupRequestData, WriteTxnMarkersRequestData} +import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AlterPartitionReassignmentsRequestData, AlterReplicaLogDirsRequestData, AlterShareGroupOffsetsRequestData, ConsumerGroupDescribeRequestData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, CreateAclsRequestData, CreatePartitionsRequestData, CreateTopicsRequestData, DeleteAclsRequestData, DeleteGroupsRequestData, DeleteRecordsRequestData, DeleteShareGroupOffsetsRequestData, DeleteShareGroupStateRequestData, DeleteTopicsRequestData, DescribeClusterRequestData, DescribeConfigsRequestData, DescribeGroupsRequestData, DescribeLogDirsRequestData, DescribeProducersRequestData, DescribeShareGroupOffsetsRequestData, DescribeTransactionsRequestData, FetchResponseData, FindCoordinatorRequestData, HeartbeatRequestData, IncrementalAlterConfigsRequestData, InitializeShareGroupStateRequestData, JoinGroupRequestData, ListPartitionReassignmentsRequestData, ListTransactionsRequestData, MetadataRequestData, OffsetCommitRequestData, OffsetFetchRequestData, OffsetFetchResponseData, ProduceRequestData, ReadShareGroupStateRequestData, ReadShareGroupStateSummaryRequestData, ShareAcknowledgeRequestData, ShareFetchRequestData, ShareGroupDescribeRequestData, ShareGroupHeartbeatRequestData, StreamsGroupDescribeRequestData, StreamsGroupHeartbeatRequestData, StreamsGroupHeartbeatResponseData, SyncGroupRequestData, WriteShareGroupStateRequestData, WriteTxnMarkersRequestData} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} -import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData import org.apache.kafka.common.requests._ import org.apache.kafka.common.resource.PatternType.{LITERAL, PREFIXED} import org.apache.kafka.common.resource.ResourceType._ import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourcePatternFilter, ResourceType} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.Utils -import org.apache.kafka.common.{ElectionType, IsolationLevel, KafkaException, TopicPartition, Uuid, requests} +import org.apache.kafka.common.{ElectionType, IsolationLevel, KafkaException, TopicIdPartition, TopicPartition, Uuid, requests} import org.apache.kafka.test.{TestUtils => JTestUtils} import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{CsvSource, MethodSource, ValueSource} - -import java.util.Collections.singletonList import org.apache.kafka.common.message.MetadataRequestData.MetadataRequestTopic import org.apache.kafka.common.message.WriteTxnMarkersRequestData.{WritableTxnMarker, WritableTxnMarkerTopic} import org.apache.kafka.coordinator.group.GroupConfig @@ -73,6 +71,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val groupDeleteAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW))) val groupDescribeConfigsAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE_CONFIGS, ALLOW))) val groupAlterConfigsAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) + val shareGroupReadAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW))) + val shareGroupDescribeAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) + val shareGroupDeleteAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW))) + val shareGroupDescribeConfigsAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE_CONFIGS, ALLOW))) + val shareGroupAlterConfigsAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) + val streamsGroupReadAcl = Map(streamsGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW))) + val streamsGroupDescribeAcl = Map(streamsGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) val clusterAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CLUSTER_ACTION, ALLOW))) val clusterCreateAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW))) val clusterAlterAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER, ALLOW))) @@ -89,15 +94,19 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val topicAlterConfigsAcl = Map(topicResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) val transactionIdWriteAcl = Map(transactionalIdResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW))) val transactionalIdDescribeAcl = Map(transactionalIdResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) + val sourceTopicDescribeAcl = Map(sourceTopicResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) val numRecords = 1 val requestKeyToError = (topicNames: Map[Uuid, String], version: Short) => Map[ApiKeys, Nothing => Errors]( ApiKeys.METADATA -> ((resp: requests.MetadataResponse) => resp.errors.asScala.find(_._1 == topic).getOrElse(("test", Errors.NONE))._2), ApiKeys.PRODUCE -> ((resp: requests.ProduceResponse) => { + val topicId = topicNames.find { case (_, topicName) => topicName == topic} + .map { case (topicId, _) => topicId } + .getOrElse(Uuid.ZERO_UUID) Errors.forCode( resp.data - .responses.find(topic) + .responses.find("", topicId) // version is always >= 13 no need to use topic name .partitionResponses.asScala.find(_.index == part).get .errorCode ) @@ -115,7 +124,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }), ApiKeys.OFFSET_COMMIT -> ((resp: requests.OffsetCommitResponse) => Errors.forCode( resp.data.topics().get(0).partitions().get(0).errorCode)), - ApiKeys.OFFSET_FETCH -> ((resp: requests.OffsetFetchResponse) => resp.groupLevelError(group)), + ApiKeys.OFFSET_FETCH -> ((resp: requests.OffsetFetchResponse) => Errors.forCode(resp.group(group).errorCode())), ApiKeys.FIND_COORDINATOR -> ((resp: FindCoordinatorResponse) => { Errors.forCode(resp.data.coordinators.asScala.find(g => group == g.key).head.errorCode) }), @@ -196,7 +205,31 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }), ApiKeys.CONSUMER_GROUP_HEARTBEAT -> ((resp: ConsumerGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), ApiKeys.CONSUMER_GROUP_DESCRIBE -> ((resp: ConsumerGroupDescribeResponse) => - Errors.forCode(resp.data.groups.asScala.find(g => group == g.groupId).head.errorCode)) + Errors.forCode(resp.data.groups.asScala.find(g => group == g.groupId).head.errorCode)), + ApiKeys.SHARE_GROUP_HEARTBEAT -> ((resp: ShareGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), + ApiKeys.SHARE_GROUP_DESCRIBE -> ((resp: ShareGroupDescribeResponse) => + Errors.forCode(resp.data.groups.asScala.find(g => shareGroup == g.groupId).head.errorCode)), + ApiKeys.SHARE_FETCH -> ((resp: ShareFetchResponse) => Errors.forCode(resp.data.errorCode)), + ApiKeys.SHARE_ACKNOWLEDGE -> ((resp: ShareAcknowledgeResponse) => Errors.forCode(resp.data.errorCode)), + ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> ((resp: InitializeShareGroupStateResponse) => Errors.forCode( + resp.data.results.get(0).partitions.get(0).errorCode)), + ApiKeys.READ_SHARE_GROUP_STATE -> ((resp: ReadShareGroupStateResponse) => Errors.forCode( + resp.data.results.get(0).partitions.get(0).errorCode)), + ApiKeys.WRITE_SHARE_GROUP_STATE -> ((resp: WriteShareGroupStateResponse) => Errors.forCode( + resp.data.results.get(0).partitions.get(0).errorCode)), + ApiKeys.DELETE_SHARE_GROUP_STATE -> ((resp: DeleteShareGroupStateResponse) => Errors.forCode( + resp.data.results.get(0).partitions.get(0).errorCode)), + ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> ((resp: ReadShareGroupStateSummaryResponse) => Errors.forCode( + resp.data.results.get(0).partitions.get(0).errorCode)), + ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> ((resp: DescribeShareGroupOffsetsResponse) => Errors.forCode( + resp.data.groups.asScala.find(g => shareGroup == g.groupId).head.errorCode)), + ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> ((resp: DeleteShareGroupOffsetsResponse) => Errors.forCode( + resp.data.errorCode)), + ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> ((resp: AlterShareGroupOffsetsResponse) => Errors.forCode( + resp.data.errorCode)), + ApiKeys.STREAMS_GROUP_HEARTBEAT -> ((resp: StreamsGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), + ApiKeys.STREAMS_GROUP_DESCRIBE -> ((resp: StreamsGroupDescribeResponse) => + Errors.forCode(resp.data.groups.asScala.find(g => streamsGroup == g.groupId).head.errorCode)) ) def findErrorForTopicId(id: Uuid, response: AbstractResponse): Errors = { @@ -252,25 +285,42 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.DESCRIBE_PRODUCERS -> topicReadAcl, ApiKeys.DESCRIBE_TRANSACTIONS -> transactionalIdDescribeAcl, ApiKeys.CONSUMER_GROUP_HEARTBEAT -> groupReadAcl, - ApiKeys.CONSUMER_GROUP_DESCRIBE -> groupDescribeAcl + ApiKeys.CONSUMER_GROUP_DESCRIBE -> groupDescribeAcl, + ApiKeys.SHARE_GROUP_HEARTBEAT -> (shareGroupReadAcl ++ topicDescribeAcl), + ApiKeys.SHARE_GROUP_DESCRIBE -> (shareGroupDescribeAcl ++ topicDescribeAcl), + ApiKeys.SHARE_FETCH -> (shareGroupReadAcl ++ topicReadAcl), + ApiKeys.SHARE_ACKNOWLEDGE -> (shareGroupReadAcl ++ topicReadAcl), + ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> clusterAcl, + ApiKeys.READ_SHARE_GROUP_STATE -> clusterAcl, + ApiKeys.WRITE_SHARE_GROUP_STATE -> clusterAcl, + ApiKeys.DELETE_SHARE_GROUP_STATE -> clusterAcl, + ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> clusterAcl, + ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> (shareGroupDescribeAcl ++ topicDescribeAcl), + ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> (shareGroupDeleteAcl ++ topicReadAcl), + ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> (shareGroupReadAcl ++ topicReadAcl), + ApiKeys.STREAMS_GROUP_HEARTBEAT -> (streamsGroupReadAcl ++ topicDescribeAcl), + ApiKeys.STREAMS_GROUP_DESCRIBE -> (streamsGroupDescribeAcl ++ topicDescribeAcl), ) private def createMetadataRequest(allowAutoTopicCreation: Boolean) = { - new requests.MetadataRequest.Builder(List(topic).asJava, allowAutoTopicCreation).build() + new requests.MetadataRequest.Builder(java.util.List.of(topic), allowAutoTopicCreation).build() } - private def createProduceRequest = + private def createProduceRequest(name: String, id: Uuid, version: Short) = { requests.ProduceRequest.builder(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) - .iterator)) - .setAcks(1.toShort) - .setTimeoutMs(5000)) - .build() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection( + util.List.of(new ProduceRequestData.TopicProduceData() + .setName(name) + .setTopicId(id) + .setPartitionData(util.List.of( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) + .iterator)) + .setAcks(1.toShort) + .setTimeoutMs(5000)) + .build(version) + } private def createFetchRequest = { val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] @@ -286,6 +336,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { requests.FetchRequest.Builder.forConsumer(version, 100, Int.MaxValue, partitionMap).build() } + private def createFetchRequestWithEmptyTopicNameAndZeroTopicId(version: Short) = { + val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] + partitionMap.put(new TopicPartition("", part), + new requests.FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 100, Optional.of(27))) + requests.FetchRequest.Builder.forConsumer(version, 100, Int.MaxValue, partitionMap).build() + } + private def createFetchFollowerRequest = { val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] partitionMap.put(tp, new requests.FetchRequest.PartitionData(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID), @@ -296,12 +353,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def createListOffsetsRequest = { requests.ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(List(new ListOffsetsTopic() + .setTargetTimes(java.util.List.of(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(List(new ListOffsetsPartition() + .setPartitions(java.util.List.of(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(0L) - .setCurrentLeaderEpoch(27)).asJava)).asJava + .setCurrentLeaderEpoch(27)))) ). build() } @@ -310,35 +367,73 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val epochs = new OffsetForLeaderTopicCollection() epochs.add(new OffsetForLeaderTopic() .setTopic(tp.topic) - .setPartitions(List(new OffsetForLeaderPartition() + .setPartitions(java.util.List.of(new OffsetForLeaderPartition() .setPartition(tp.partition) .setLeaderEpoch(7) - .setCurrentLeaderEpoch(27)).asJava)) + .setCurrentLeaderEpoch(27)))) OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build() } private def createOffsetFetchRequest: OffsetFetchRequest = { - new requests.OffsetFetchRequest.Builder(group, false, List(tp).asJava, false).build() + OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(false) + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group) + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(tp.topic) + .setPartitionIndexes(util.List.of[Integer](tp.partition)) + )) + )), + false + ).build() } private def createOffsetFetchRequestAllPartitions: OffsetFetchRequest = { - new requests.OffsetFetchRequest.Builder(group, false, null, false).build() + OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(false) + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(group) + .setTopics(null) + )), + false + ).build() } private def createOffsetFetchRequest(groupToPartitionMap: util.Map[String, util.List[TopicPartition]]): OffsetFetchRequest = { - new requests.OffsetFetchRequest.Builder(groupToPartitionMap, false, false).build() + OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setGroups(groupToPartitionMap.asScala.map { case (groupId, partitions) => + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setTopics( + if (partitions == null) + null + else + partitions.asScala.groupBy(_.topic).map { case (topic, partitions) => + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(topic) + .setPartitionIndexes(partitions.map(_.partition).map(Int.box).asJava) + }.toList.asJava) + }.toList.asJava), + false + ).build() } private def createFindCoordinatorRequest = { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) - .setCoordinatorKeys(Collections.singletonList(group))).build() + .setCoordinatorKeys(util.List.of(group))).build() } private def createJoinGroupRequest = { val protocolSet = new JoinGroupRequestProtocolCollection( - Collections.singletonList(new JoinGroupRequestData.JoinGroupRequestProtocol() + util.List.of(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocolName) .setMetadata("test".getBytes()) ).iterator()) @@ -363,24 +458,24 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setProtocolType(protocolType) .setProtocolName(protocolName) - .setAssignments(Collections.emptyList()) + .setAssignments(util.List.of) ).build() } private def createDescribeGroupsRequest = { - new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(List(group).asJava)).build() + new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(java.util.List.of(group))).build() } private def createOffsetCommitRequest = { - new requests.OffsetCommitRequest.Builder( + requests.OffsetCommitRequest.Builder.forTopicNames( new OffsetCommitRequestData() .setGroupId(group) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setGenerationIdOrMemberEpoch(1) - .setTopics(Collections.singletonList( + .setTopics(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topic) - .setPartitions(Collections.singletonList( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(part) .setCommittedOffset(0) @@ -410,19 +505,19 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID)).build() private def leaveGroupRequest = new LeaveGroupRequest.Builder( - group, Collections.singletonList( + group, util.List.of( new MemberIdentity() .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) )).build() private def deleteGroupsRequest = new DeleteGroupsRequest.Builder( new DeleteGroupsRequestData() - .setGroupsNames(Collections.singletonList(group)) + .setGroupsNames(util.List.of(group)) ).build() private def createTopicsRequest: CreateTopicsRequest = { new CreateTopicsRequest.Builder(new CreateTopicsRequestData().setTopics( - new CreatableTopicCollection(Collections.singleton(new CreatableTopic(). + new CreatableTopicCollection(util.Set.of(new CreatableTopic(). setName(topic).setNumPartitions(1). setReplicationFactor(1.toShort)).iterator))).build() } @@ -430,14 +525,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def deleteTopicsRequest: DeleteTopicsRequest = { new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopicNames(Collections.singletonList(topic)) + .setTopicNames(util.List.of(topic)) .setTimeoutMs(5000)).build() } private def deleteTopicsWithIdsRequest(topicId: Uuid): DeleteTopicsRequest = { new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopics(Collections.singletonList( + .setTopics(util.List.of( new DeleteTopicsRequestData.DeleteTopicState() .setTopicId(topicId))) .setTimeoutMs(5000)).build() @@ -446,21 +541,21 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def deleteRecordsRequest = new DeleteRecordsRequest.Builder( new DeleteRecordsRequestData() .setTimeoutMs(5000) - .setTopics(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsTopic() + .setTopics(util.List.of(new DeleteRecordsRequestData.DeleteRecordsTopic() .setName(tp.topic) - .setPartitions(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsPartition() + .setPartitions(util.List.of(new DeleteRecordsRequestData.DeleteRecordsPartition() .setPartitionIndex(tp.partition) .setOffset(0L)))))).build() private def describeConfigsRequest = - new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(Collections.singletonList( + new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(util.List.of( new DescribeConfigsRequestData.DescribeConfigsResource().setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic)))).build() private def alterConfigsRequest = new AlterConfigsRequest.Builder( - Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), - new AlterConfigsRequest.Config(Collections.singleton( + util.Map.of(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), + new AlterConfigsRequest.Config(util.Set.of( new AlterConfigsRequest.ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "1000000") ))), true).build() @@ -490,7 +585,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } private def describeGroupConfigsRequest = { - new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(Collections.singletonList( + new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(util.List.of( new DescribeConfigsRequestData.DescribeConfigsResource().setResourceType(ConfigResource.Type.GROUP.id) .setResourceName(group)))).build() } @@ -498,7 +593,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def describeAclsRequest = new DescribeAclsRequest.Builder(AclBindingFilter.ANY).build() private def createAclsRequest: CreateAclsRequest = new CreateAclsRequest.Builder( - new CreateAclsRequestData().setCreations(Collections.singletonList( + new CreateAclsRequestData().setCreations(util.List.of( new CreateAclsRequestData.AclCreation() .setResourceType(ResourceType.TOPIC.code) .setResourceName("mytopic") @@ -510,7 +605,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ).build() private def deleteAclsRequest: DeleteAclsRequest = new DeleteAclsRequest.Builder( - new DeleteAclsRequestData().setFilters(Collections.singletonList( + new DeleteAclsRequestData().setFilters(util.List.of( new DeleteAclsRequestData.DeleteAclsFilter() .setResourceTypeFilter(ResourceType.TOPIC.code) .setResourceNameFilter(null) @@ -526,16 +621,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setPath(logDir) dir.topics.add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic() .setName(tp.topic) - .setPartitions(Collections.singletonList(tp.partition))) + .setPartitions(util.List.of(tp.partition))) val data = new AlterReplicaLogDirsRequestData() data.dirs.add(dir) new AlterReplicaLogDirsRequest.Builder(data).build() } - private def describeLogDirsRequest = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(new DescribeLogDirsRequestData.DescribableLogDirTopicCollection(Collections.singleton( - new DescribeLogDirsRequestData.DescribableLogDirTopic().setTopic(tp.topic).setPartitions(Collections.singletonList(tp.partition))).iterator()))).build() + private def describeLogDirsRequest = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(new DescribeLogDirsRequestData.DescribableLogDirTopicCollection(util.Set.of( + new DescribeLogDirsRequestData.DescribableLogDirTopic().setTopic(tp.topic).setPartitions(util.List.of(tp.partition))).iterator()))).build() - private def addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, 1, 1, Collections.singletonList(tp)).build() + private def addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, 1, 1, util.List.of(tp)).build() private def addOffsetsToTxnRequest = new AddOffsetsToTxnRequest.Builder( new AddOffsetsToTxnRequestData() @@ -547,56 +642,56 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def electLeadersRequest = new ElectLeadersRequest.Builder( ElectionType.PREFERRED, - Collections.singleton(tp), + util.Set.of(tp), 10000 ).build() private def describeProducersRequest: DescribeProducersRequest = new DescribeProducersRequest.Builder( new DescribeProducersRequestData() - .setTopics(List( + .setTopics(java.util.List.of( new DescribeProducersRequestData.TopicRequest() .setName(tp.topic) - .setPartitionIndexes(List(Int.box(tp.partition)).asJava) - ).asJava) + .setPartitionIndexes(java.util.List.of(Int.box(tp.partition))) + )) ).build() private def describeTransactionsRequest: DescribeTransactionsRequest = new DescribeTransactionsRequest.Builder( - new DescribeTransactionsRequestData().setTransactionalIds(List(transactionalId).asJava) + new DescribeTransactionsRequestData().setTransactionalIds(java.util.List.of(transactionalId)) ).build() private def alterPartitionReassignmentsRequest = new AlterPartitionReassignmentsRequest.Builder( new AlterPartitionReassignmentsRequestData().setTopics( - List(new AlterPartitionReassignmentsRequestData.ReassignableTopic() + java.util.List.of(new AlterPartitionReassignmentsRequestData.ReassignableTopic() .setName(topic) .setPartitions( - List(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition)).asJava - )).asJava + java.util.List.of(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition)) + )) ) ).build() private def listPartitionReassignmentsRequest = new ListPartitionReassignmentsRequest.Builder( new ListPartitionReassignmentsRequestData().setTopics( - List(new ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics() + java.util.List.of(new ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics() .setName(topic) .setPartitionIndexes( - List(Integer.valueOf(tp.partition)).asJava - )).asJava + java.util.List.of(Integer.valueOf(tp.partition)) + )) ) ).build() private def writeTxnMarkersRequest: WriteTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( new WriteTxnMarkersRequestData() .setMarkers( - List(new WritableTxnMarker() + java.util.List.of(new WritableTxnMarker() .setProducerId(producerId) .setProducerEpoch(1) .setTransactionResult(false) - .setTopics(List(new WritableTxnMarkerTopic() + .setTopics(java.util.List.of(new WritableTxnMarkerTopic() .setName(tp.topic()) - .setPartitionIndexes(List(Integer.valueOf(tp.partition())).asJava) - ).asJava) + .setPartitionIndexes(java.util.List.of(Integer.valueOf(tp.partition()))) + )) .setCoordinatorEpoch(1) - ).asJava + ) ) ).build() @@ -604,13 +699,187 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { new ConsumerGroupHeartbeatRequestData() .setGroupId(group) .setMemberEpoch(0) - .setSubscribedTopicNames(List(topic).asJava)).build() + .setSubscribedTopicNames(java.util.List.of(topic))).build() private def consumerGroupDescribeRequest = new ConsumerGroupDescribeRequest.Builder( new ConsumerGroupDescribeRequestData() - .setGroupIds(List(group).asJava) + .setGroupIds(java.util.List.of(group)) .setIncludeAuthorizedOperations(false)).build() + private def shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId(shareGroup) + .setMemberEpoch(0) + .setSubscribedTopicNames(List(topic).asJava)).build(ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion) + + + private def shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( + new ShareGroupDescribeRequestData() + .setGroupIds(List(shareGroup).asJava) + .setIncludeAuthorizedOperations(false)).build(ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion) + + + private def createShareFetchRequest = { + val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) + val send: Seq[TopicIdPartition] = Seq( + new TopicIdPartition(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID), new TopicPartition(topic, part))) + val ackMap = new util.HashMap[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + requests.ShareFetchRequest.Builder.forConsumer(shareGroup, metadata, 100, 0, Int.MaxValue, 500, 500, + send.asJava, Seq.empty.asJava, ackMap).build() + } + + private def shareAcknowledgeRequest = { + val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() + .setGroupId(shareGroup) + .setMemberId(Uuid.randomUuid().toString) + .setShareSessionEpoch(1) + .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitionIndex(part) + .setAcknowledgementBatches(List( + new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(1) + .setAcknowledgeTypes(util.List.of(1.toByte)) + ).asJava) + ).iterator)) + ).iterator)) + + new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) + } + + private def initializeShareGroupStateRequest = new InitializeShareGroupStateRequest.Builder( + new InitializeShareGroupStateRequestData() + .setGroupId(shareGroup) + .setTopics(List(new InitializeShareGroupStateRequestData.InitializeStateData() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(List(new InitializeShareGroupStateRequestData.PartitionData() + .setPartition(part) + ).asJava) + ).asJava)).build() + + private def readShareGroupStateRequest = new ReadShareGroupStateRequest.Builder( + new ReadShareGroupStateRequestData() + .setGroupId(shareGroup) + .setTopics(List(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(List(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(part) + .setLeaderEpoch(0) + ).asJava) + ).asJava)).build() + + private def writeShareGroupStateRequest = new WriteShareGroupStateRequest.Builder( + new WriteShareGroupStateRequestData() + .setGroupId(shareGroup) + .setTopics(List(new WriteShareGroupStateRequestData.WriteStateData() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(List(new WriteShareGroupStateRequestData.PartitionData() + .setPartition(part) + ).asJava) + ).asJava)).build() + + private def deleteShareGroupStateRequest = new DeleteShareGroupStateRequest.Builder( + new DeleteShareGroupStateRequestData() + .setGroupId(shareGroup) + .setTopics(List(new DeleteShareGroupStateRequestData.DeleteStateData() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(List(new DeleteShareGroupStateRequestData.PartitionData() + .setPartition(part) + ).asJava) + ).asJava)).build() + + private def readShareGroupStateSummaryRequest = new ReadShareGroupStateSummaryRequest.Builder( + new ReadShareGroupStateSummaryRequestData() + .setGroupId(shareGroup) + .setTopics(List(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions(List(new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(part) + .setLeaderEpoch(0) + ).asJava) + ).asJava)).build(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY.latestVersion) + + private def describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequest.Builder( + new DescribeShareGroupOffsetsRequestData() + .setGroups(List(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup() + .setGroupId(shareGroup) + .setTopics(List(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestTopic() + .setTopicName(topic) + .setPartitions(List(Integer.valueOf(part) + ).asJava) + ).asJava) + ).asJava)).build(ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS.latestVersion) + + private def deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequest.Builder( + new DeleteShareGroupOffsetsRequestData() + .setGroupId(shareGroup) + .setTopics(List(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic() + .setTopicName(topic) + ).asJava)).build(ApiKeys.DELETE_SHARE_GROUP_OFFSETS.latestVersion) + + private def alterShareGroupOffsetsRequest = { + val data = new AlterShareGroupOffsetsRequestData + val topicCollection = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection() + topicCollection.add(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topic) + .setPartitions(List(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(part) + .setStartOffset(0) + ).asJava)) + data.setGroupId(shareGroup).setTopics(topicCollection) + new AlterShareGroupOffsetsRequest.Builder(data).build(ApiKeys.ALTER_SHARE_GROUP_OFFSETS.latestVersion) + } + + private def streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequest.Builder( + new StreamsGroupHeartbeatRequestData() + .setGroupId(streamsGroup) + .setMemberId("member-id") + .setMemberEpoch(0) + .setRebalanceTimeoutMs(1000) + .setActiveTasks(List.empty.asJava) + .setStandbyTasks(List.empty.asJava) + .setWarmupTasks(List.empty.asJava) + .setTopology(new StreamsGroupHeartbeatRequestData.Topology().setSubtopologies( + List(new StreamsGroupHeartbeatRequestData.Subtopology() + .setSourceTopics(List(topic).asJava) + ).asJava + ))).build(ApiKeys.STREAMS_GROUP_HEARTBEAT.latestVersion) + + private def streamsGroupHeartbeatRequest( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ) = new StreamsGroupHeartbeatRequest.Builder( + new StreamsGroupHeartbeatRequestData() + .setGroupId(streamsGroup) + .setMemberId("member-id") + .setMemberEpoch(0) + .setRebalanceTimeoutMs(1000) + .setActiveTasks(List.empty.asJava) + .setStandbyTasks(List.empty.asJava) + .setWarmupTasks(List.empty.asJava) + .setTopology(new StreamsGroupHeartbeatRequestData.Topology().setSubtopologies( + List(new StreamsGroupHeartbeatRequestData.Subtopology() + .setSourceTopics( + (if (topicAsSourceTopic) List(sourceTopic, topic) else List(sourceTopic)).asJava) + .setRepartitionSinkTopics( + (if (topicAsRepartitionSinkTopic) List(topic) else List.empty).asJava) + .setRepartitionSourceTopics( + (if (topicAsRepartitionSourceTopic) List(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(topic).setPartitions(3)) else List.empty).asJava) + .setStateChangelogTopics( + (if (topicAsStateChangelogTopics) List(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(topic)) else List.empty).asJava) + ).asJava + ))).build(ApiKeys.STREAMS_GROUP_HEARTBEAT.latestVersion) + + private def streamsGroupDescribeRequest = new StreamsGroupDescribeRequest.Builder( + new StreamsGroupDescribeRequestData() + .setGroupIds(List(streamsGroup).asJava) + .setIncludeAuthorizedOperations(false)).build(ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion) + private def sendRequests(requestKeyToRequest: mutable.Map[ApiKeys, AbstractRequest], topicExists: Boolean = true, topicNames: Map[Uuid, String] = getTopicNames()) = { for ((key, request) <- requestKeyToRequest) { @@ -625,6 +894,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // In KRaft mode, trying to delete a topic that doesn't exist but that you do have // describe permission for will give UNKNOWN_TOPIC_OR_PARTITION. true + } else if (resourceToAcls.size > 1) { + false } else { describeAcls == acls } @@ -639,15 +910,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizationWithTopicExisting(quorum: String): Unit = { + @Test + def testAuthorizationWithTopicExisting(): Unit = { //First create the topic so we have a valid topic ID - sendRequests(mutable.Map(ApiKeys.CREATE_TOPICS -> createTopicsRequest)) + createTopicWithBrokerPrincipal(topic) + val topicId = getTopicIds()(topic) + assertNotNull(topicId) val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( ApiKeys.METADATA -> createMetadataRequest(allowAutoTopicCreation = true), - ApiKeys.PRODUCE -> createProduceRequest, + ApiKeys.PRODUCE -> createProduceRequest("", topicId, ApiKeys.PRODUCE.latestVersion()), ApiKeys.FETCH -> createFetchRequest, ApiKeys.LIST_OFFSETS -> createListOffsetsRequest, ApiKeys.OFFSET_FETCH -> createOffsetFetchRequest, @@ -679,24 +951,38 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.WRITE_TXN_MARKERS -> writeTxnMarkersRequest, ApiKeys.CONSUMER_GROUP_HEARTBEAT -> consumerGroupHeartbeatRequest, ApiKeys.CONSUMER_GROUP_DESCRIBE -> consumerGroupDescribeRequest, + ApiKeys.SHARE_GROUP_HEARTBEAT -> shareGroupHeartbeatRequest, + ApiKeys.SHARE_GROUP_DESCRIBE -> shareGroupDescribeRequest, + ApiKeys.SHARE_FETCH -> createShareFetchRequest, + ApiKeys.SHARE_ACKNOWLEDGE -> shareAcknowledgeRequest, + ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> initializeShareGroupStateRequest, + ApiKeys.READ_SHARE_GROUP_STATE -> readShareGroupStateRequest, + ApiKeys.WRITE_SHARE_GROUP_STATE -> writeShareGroupStateRequest, + ApiKeys.DELETE_SHARE_GROUP_STATE -> deleteShareGroupStateRequest, + ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> readShareGroupStateSummaryRequest, + ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> describeShareGroupOffsetsRequest, + ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> deleteShareGroupOffsetsRequest, + ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> alterShareGroupOffsetsRequest, + ApiKeys.STREAMS_GROUP_HEARTBEAT -> streamsGroupHeartbeatRequest, + ApiKeys.STREAMS_GROUP_DESCRIBE -> streamsGroupDescribeRequest, + // Delete the topic last ApiKeys.DELETE_TOPICS -> deleteTopicsRequest ) - sendRequests(requestKeyToRequest, true) + sendRequests(requestKeyToRequest) } /* * even if the topic doesn't exist, request APIs should not leak the topic name */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizationWithTopicNotExisting(quorum: String): Unit = { + @Test + def testAuthorizationWithTopicNotExisting(): Unit = { val id = Uuid.randomUuid() val topicNames = Map(id -> "topic") val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( ApiKeys.METADATA -> createMetadataRequest(allowAutoTopicCreation = false), - ApiKeys.PRODUCE -> createProduceRequest, + ApiKeys.PRODUCE -> createProduceRequest("", id, ApiKeys.PRODUCE.latestVersion()), ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, ApiKeys.FETCH.latestVersion()), ApiKeys.LIST_OFFSETS -> createListOffsetsRequest, ApiKeys.OFFSET_COMMIT -> createOffsetCommitRequest, @@ -708,14 +994,87 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.CREATE_PARTITIONS -> createPartitionsRequest, ApiKeys.DELETE_GROUPS -> deleteGroupsRequest, ApiKeys.OFFSET_FOR_LEADER_EPOCH -> offsetsForLeaderEpochRequest, - ApiKeys.ELECT_LEADERS -> electLeadersRequest + ApiKeys.ELECT_LEADERS -> electLeadersRequest, + ApiKeys.SHARE_FETCH -> createShareFetchRequest, + ApiKeys.SHARE_ACKNOWLEDGE -> shareAcknowledgeRequest, + ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> describeShareGroupOffsetsRequest, + ApiKeys.STREAMS_GROUP_HEARTBEAT -> streamsGroupHeartbeatRequest, + ApiKeys.STREAMS_GROUP_DESCRIBE -> streamsGroupDescribeRequest ) - sendRequests(requestKeyToRequest, false, topicNames) + sendRequests(requestKeyToRequest, topicExists = false, topicNames) + } + + /** + * Test that the produce request fails with TOPIC_AUTHORIZATION_FAILED if the client doesn't have permission + * and topic name is used in the request. Even if the topic doesn't exist, we return TOPIC_AUTHORIZATION_FAILED to + * prevent leaking the topic name. + * This case covers produce request version from oldest to 12. + * The newer version is covered by testAuthorizationWithTopicNotExisting and testAuthorizationWithTopicExisting. + */ + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testAuthorizationProduceVersionFromOldestTo12(withTopicExisting: Boolean): Unit = { + if (withTopicExisting) { + createTopicWithBrokerPrincipal(topic) + } + + for (version <- ApiKeys.PRODUCE.oldestVersion to 12) { + val request = createProduceRequest(topic, Uuid.ZERO_UUID, version.toShort) + val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + val errorCode = response.asInstanceOf[ProduceResponse] + .data() + .responses() + .find(topic, Uuid.ZERO_UUID) + .partitionResponses.asScala.find(_.index == part).get + .errorCode + + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for produce request version $version") + } + } + + /** + * Test that the produce request fails with UNKNOWN_TOPIC_ID if topic id is zero when request version >= 13. + * The produce request only supports topic id above version 13. + */ + @Test + def testZeroTopicIdForProduceVersionFrom13ToNewest(): Unit = { + for (version <- 13 to ApiKeys.PRODUCE.latestVersion()) { + val request = createProduceRequest("", Uuid.ZERO_UUID, version.toShort) + val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + val errorCode = response.asInstanceOf[ProduceResponse] + .data() + .responses() + .find("", Uuid.ZERO_UUID) + .partitionResponses.asScala.find(_.index == part).get + .errorCode + + assertEquals(Errors.UNKNOWN_TOPIC_ID.code(), errorCode, s"unexpected error for produce request version $version") + } + } + + /** + * Test that the produce request fails with TOPIC_AUTHORIZATION_FAILED if topic name is empty when request version <= 12. + * The produce request only supports topic name below version 12. + */ + @Test + def testEmptyTopicNameForProduceVersionFromOldestTo12(): Unit = { + for (version <- ApiKeys.PRODUCE.oldestVersion() to 12) { + val request = createProduceRequest("", Uuid.ZERO_UUID, version.toShort) + val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + val errorCode = response.asInstanceOf[ProduceResponse] + .data() + .responses() + .find("", Uuid.ZERO_UUID) + .partitionResponses.asScala.find(_.index == part).get + .errorCode + + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for produce request version $version") + } } @ParameterizedTest - @CsvSource(value = Array("false", "true")) + @ValueSource(booleans = Array(true, false)) def testTopicIdAuthorization(withTopicExisting: Boolean): Unit = { val topicId = if (withTopicExisting) { createTopicWithBrokerPrincipal(topic) @@ -764,24 +1123,77 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - /* - * even if the topic doesn't exist, request APIs should not leak the topic name + /** + * Test that the fetch request fails with TOPIC_AUTHORIZATION_FAILED if the client doesn't have permission + * and topic name is used in the request. Even if the topic doesn't exist, we return TOPIC_AUTHORIZATION_FAILED to + * prevent leaking the topic name. + * This case covers fetch request version from oldest to 12. + * The newer version is covered by testAuthorizationWithTopicNotExisting and testAuthorizationWithTopicExisting. */ @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizationFetchV12WithTopicNotExisting(quorum: String): Unit = { + @ValueSource(booleans = Array(true, false)) + def testAuthorizationFetchVersionFromOldestTo12(withTopicExisting: Boolean): Unit = { + if (withTopicExisting) { + createTopicWithBrokerPrincipal(topic) + } + val id = Uuid.ZERO_UUID - val topicNames = Map(id -> "topic") - val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( - ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, 12), - ) + val topicNames = Map(id -> topic) + for (version <- ApiKeys.FETCH.oldestVersion to 12) { + val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( + ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, version.toShort), + ) - sendRequests(requestKeyToRequest, false, topicNames) + sendRequests(requestKeyToRequest, withTopicExisting, topicNames) + } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateTopicAuthorizationWithClusterCreate(quorum: String): Unit = { + /** + * Test that the fetch request fails with UNKNOWN_TOPIC_ID if topic id is zero when request version >= 13. + * The fetch request only supports topic id above version 13. + */ + @Test + def testZeroTopicIdForFetchVersionFrom13ToNewest(): Unit = { + for (version <- 13 to ApiKeys.FETCH.latestVersion) { + val request = createFetchRequestWithEmptyTopicNameAndZeroTopicId(version.toShort) + val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + + val errorCode = response.asInstanceOf[FetchResponse] + .data() + .responses() + .get(0) + .partitions() + .get(0) + .errorCode + + assertEquals(Errors.UNKNOWN_TOPIC_ID.code(), errorCode, s"unexpected error for fetch request version $version") + } + } + + /** + * Test that the fetch request fails with TOPIC_AUTHORIZATION_FAILED if topic name is empty when request version <= 12. + * The fetch request only supports topic name below version 12. + */ + @Test + def testEmptyTopicNameForFetchVersionFromOldestTo12(): Unit = { + for (version <- ApiKeys.FETCH.oldestVersion to 12) { + val request = createFetchRequestWithEmptyTopicNameAndZeroTopicId(version.toShort) + val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + + val errorCode = response.asInstanceOf[FetchResponse] + .data() + .responses() + .get(0) + .partitions() + .get(0) + .errorCode + + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for fetch request version $version") + } + } + + @Test + def testCreateTopicAuthorizationWithClusterCreate(): Unit = { removeAllClientAcls() val resources = Set[ResourceType](TOPIC) @@ -792,9 +1204,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(createTopicsRequest, resources, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchFollowerRequest(quorum: String): Unit = { + @Test + def testFetchFollowerRequest(): Unit = { createTopicWithBrokerPrincipal(topic) val request = createFetchFollowerRequest @@ -840,9 +1251,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { succeededPartitionDatas.foreach(partitionData => assertEquals(MemoryRecords.EMPTY, partitionData.records)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterConfigsRequestRequiresClusterPermissionForBrokerLogger(quorum: String): Unit = { + @Test + def testIncrementalAlterConfigsRequestRequiresClusterPermissionForBrokerLogger(): Unit = { createTopicWithBrokerPrincipal(topic) val data = new IncrementalAlterConfigsRequestData @@ -864,9 +1274,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resources, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testOffsetsForLeaderEpochClusterPermission(quorum: String): Unit = { + @Test + def testOffsetsForLeaderEpochClusterPermission(): Unit = { createTopicWithBrokerPrincipal(topic) val request = offsetsForLeaderEpochRequest @@ -883,50 +1292,44 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resources, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceWithNoTopicAccess(quorum: String): Unit = { + @Test + def testProduceWithNoTopicAccess(): Unit = { createTopicWithBrokerPrincipal(topic) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceWithTopicDescribe(quorum: String): Unit = { + @Test + def testProduceWithTopicDescribe(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceWithTopicRead(quorum: String): Unit = { + @Test + def testProduceWithTopicRead(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceWithTopicWrite(quorum: String): Unit = { + @Test + def testProduceWithTopicWrite(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) val producer = createProducer() sendRecords(producer, numRecords, tp) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreatePermissionOnTopicToWriteToNonExistentTopic(quorum: String): Unit = { + @Test + def testCreatePermissionOnTopicToWriteToNonExistentTopic(): Unit = { testCreatePermissionNeededToWriteToNonExistentTopic(TOPIC) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreatePermissionOnClusterToWriteToNonExistentTopic(quorum: String): Unit = { + @Test + def testCreatePermissionOnClusterToWriteToNonExistentTopic(): Unit = { testCreatePermissionNeededToWriteToNonExistentTopic(CLUSTER) } @@ -935,7 +1338,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), newTopicResource) val producer = createProducer() val e = assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) - assertEquals(Collections.singleton(tp.topic), e.unauthorizedTopics()) + assertEquals(util.Set.of(tp.topic), e.unauthorizedTopics()) val resource = if (resType == ResourceType.TOPIC) newTopicResource else clusterResource addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), resource) @@ -954,7 +1357,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { removeAllClientAcls() val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) } @@ -973,7 +1376,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // note this still depends on group access because we haven't set offsets explicitly, which means // they will first be fetched from the consumer coordinator (which requires group access) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[GroupAuthorizationException], () => consumeRecords(consumer)) assertEquals(group, e.groupId()) } @@ -993,8 +1396,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // in this case, we do an explicit seek, so there should be no need to query the coordinator at all // remove the group.id config to avoid coordinator created val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(List(tp).asJava) - consumer.seekToBeginning(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.seekToBeginning(java.util.List.of(tp)) consumeRecords(consumer) } @@ -1010,10 +1413,10 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) + assertEquals(util.Set.of(topic), e.unauthorizedTopics()) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1030,9 +1433,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) + assertEquals(util.Set.of(topic), e.unauthorizedTopics()) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1049,9 +1452,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) + assertEquals(util.Set.of(topic), e.unauthorizedTopics()) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1068,7 +1471,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumeRecords(consumer) } @@ -1114,7 +1517,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val consumer = createConsumer() consumer.subscribe(Pattern.compile(topicPattern)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) + assertEquals(util.Set.of(topic), e.unauthorizedTopics()) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1176,7 +1579,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // ensure that internal topics are not included if no permission consumer.subscribe(Pattern.compile(".*")) consumeRecords(consumer) - assertEquals(Set(topic).asJava, consumer.subscription) + assertEquals(java.util.Set.of(topic), consumer.subscription) // now authorize the user for the internal topic and verify that we can subscribe addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), new ResourcePattern(TOPIC, @@ -1211,7 +1614,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumeRecords(consumer) consumeRecords(consumer) }) - assertEquals(Collections.singleton(GROUP_METADATA_TOPIC_NAME), e.unauthorizedTopics()) + assertEquals(util.Set.of(GROUP_METADATA_TOPIC_NAME), e.unauthorizedTopics()) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1255,10 +1658,10 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), newTopicResource) addAndVerifyAcls(groupReadAcl(groupResource), groupResource) val consumer = createConsumer() - consumer.assign(List(topicPartition).asJava) + consumer.assign(java.util.List.of(topicPartition)) val unauthorizedTopics = assertThrows(classOf[TopicAuthorizationException], () => (0 until 10).foreach(_ => consumer.poll(Duration.ofMillis(50L)))).unauthorizedTopics - assertEquals(Collections.singleton(newTopic), unauthorizedTopics) + assertEquals(util.Set.of(newTopic), unauthorizedTopics) val resource = if (resType == TOPIC) newTopicResource else clusterResource addAndVerifyAcls(acls, resource) @@ -1274,17 +1677,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }, "Partition metadata not propagated.") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreatePermissionMetadataRequestAutoCreate(quorum: String): Unit = { + @Test + def testCreatePermissionMetadataRequestAutoCreate(): Unit = { val readAcls = topicReadAcl(topicResource) addAndVerifyAcls(readAcls, topicResource) brokers.foreach(b => assertEquals(Optional.empty, b.metadataCache.getLeaderAndIsr(topic, 0))) - val metadataRequest = new MetadataRequest.Builder(List(topic).asJava, true).build() + val metadataRequest = new MetadataRequest.Builder(java.util.List.of(topic), true).build() val metadataResponse = connectAndReceive[MetadataResponse](metadataRequest) - assertEquals(Set().asJava, metadataResponse.topicsByError(Errors.NONE)) + assertEquals(java.util.Set.of(), metadataResponse.topicsByError(Errors.NONE)) val createAcls = topicCreateAcl(topicResource) addAndVerifyAcls(createAcls, topicResource) @@ -1292,7 +1694,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // retry as topic being created can have MetadataResponse with Errors.LEADER_NOT_AVAILABLE TestUtils.retry(JTestUtils.DEFAULT_MAX_WAIT_MS) { val metadataResponse = connectAndReceive[MetadataResponse](metadataRequest) - assertEquals(Set(topic).asJava, metadataResponse.topicsByError(Errors.NONE)) + assertEquals(java.util.Set.of(topic), metadataResponse.topicsByError(Errors.NONE)) } } @@ -1300,7 +1702,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testCommitWithNoAccess(groupProtocol: String): Unit = { val consumer = createConsumer() - assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) + assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1308,7 +1710,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { def testCommitWithNoTopicAccess(groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1319,7 +1721,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1330,7 +1732,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1338,7 +1740,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { def testCommitWithNoGroupAccess(groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) + assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1348,14 +1750,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("getTestGroupProtocolParametersAll")) def testOffsetFetchWithNoAccess(groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertThrows(classOf[TopicAuthorizationException], () => consumer.position(tp)) } @@ -1365,7 +1767,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertThrows(classOf[GroupAuthorizationException], () => consumer.position(tp)) } @@ -1374,7 +1776,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { def testOffsetFetchWithNoTopicAccess(groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertThrows(classOf[TopicAuthorizationException], () => consumer.position(tp)) } @@ -1387,8 +1789,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(offset)).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(offset))) removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) @@ -1399,15 +1801,20 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // without describe permission on the topic, we shouldn't be able to fetch offsets val offsetFetchRequest = createOffsetFetchRequestAllPartitions var offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(group)) - assertTrue(offsetFetchResponse.partitionDataMap(group).isEmpty) + assertEquals(Errors.NONE, Errors.forCode(offsetFetchResponse.group(group).errorCode())) + assertTrue(offsetFetchResponse.group(group).topics.isEmpty) // now add describe permission on the topic and verify that the offset can be fetched addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(group)) - assertTrue(offsetFetchResponse.partitionDataMap(group).containsKey(tp)) - assertEquals(offset, offsetFetchResponse.partitionDataMap(group).get(tp).offset) + assertEquals(Errors.NONE, Errors.forCode(offsetFetchResponse.group(group).errorCode())) + assertEquals( + offset, + offsetFetchResponse.group(group).topics.asScala + .find(_.name == tp.topic) + .flatMap(_.partitions.asScala.find(_.partitionIndex == tp.partition).map(_.committedOffset)) + .getOrElse(-1L) + ) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1418,12 +1825,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val topics: Seq[String] = (1 to 3).map(i => s"topic$i") val topicResources = topics.map(topic => new ResourcePattern(TOPIC, topic, LITERAL)) - val topic1List = singletonList(new TopicPartition(topics(0), 0)) - val topic1And2List = util.Arrays.asList( + val topic1List = util.List.of(new TopicPartition(topics(0), 0)) + val topic1And2List = util.List.of( new TopicPartition(topics(0), 0), new TopicPartition(topics(1), 0), new TopicPartition(topics(1), 1)) - val allTopicsList = util.Arrays.asList( + val allTopicsList = util.List.of( new TopicPartition(topics(0), 0), new TopicPartition(topics(1), 0), new TopicPartition(topics(1), 1), @@ -1442,21 +1849,33 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { createTopicWithBrokerPrincipal(topics(0)) createTopicWithBrokerPrincipal(topics(1), numPartitions = 2) createTopicWithBrokerPrincipal(topics(2), numPartitions = 3) - groupResources.foreach(r => { + groupResources.foreach { r => addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), r) - }) - topicResources.foreach(t => { + } + topicResources.foreach { t => addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), t) - }) + } val offset = 15L val leaderEpoch: Optional[Integer] = Optional.of(1) val metadata = "metadata" + def assertResponse( + expected: OffsetFetchResponseData.OffsetFetchResponseGroup, + actual: OffsetFetchResponseData.OffsetFetchResponseGroup + ): Unit = { + actual.topics.sort((t1, t2) => t1.name.compareTo(t2.name)) + actual.topics.asScala.foreach { topic => + topic.partitions.sort(Comparator.comparingInt[OffsetFetchResponseData.OffsetFetchResponsePartitions](_.partitionIndex)) + } + + assertEquals(expected, actual) + } + def commitOffsets(tpList: util.List[TopicPartition]): Unit = { val consumer = createConsumer() consumer.assign(tpList) - val offsets = tpList.asScala.map{ + val offsets = tpList.asScala.map { tp => (tp, new OffsetAndMetadata(offset, leaderEpoch, metadata)) }.toMap.asJava consumer.commitSync(offsets) @@ -1472,98 +1891,298 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { removeAllClientAcls() - def verifyPartitionData(partitionData: OffsetFetchResponse.PartitionData): Unit = { - assertTrue(!partitionData.hasError) - assertEquals(offset, partitionData.offset) - assertEquals(metadata, partitionData.metadata) - assertEquals(leaderEpoch.get(), partitionData.leaderEpoch.get()) - } - - def verifyResponse(groupLevelResponse: Errors, - partitionData: util.Map[TopicPartition, PartitionData], - topicList: util.List[TopicPartition]): Unit = { - assertEquals(Errors.NONE, groupLevelResponse) - assertTrue(partitionData.size() == topicList.size()) - topicList.forEach(t => verifyPartitionData(partitionData.get(t))) - } - // test handling partial errors, where one group is fully authorized, some groups don't have // the right topic authorizations, and some groups have no authorization addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(0)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(1)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(3)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(0)) + val offsetFetchRequest = createOffsetFetchRequest(groupToPartitionMap) var offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - offsetFetchResponse.data().groups().forEach(g => - g.groupId() match { + + offsetFetchResponse.data.groups.forEach { g => + g.groupId match { case "group1" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(0)), offsetFetchResponse - .partitionDataMap(groups(0)), topic1List) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group2" => - assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(groups(1))) - val group2Response = offsetFetchResponse.partitionDataMap(groups(1)) - assertTrue(group2Response.size() == 3) - assertTrue(group2Response.keySet().containsAll(topic1And2List)) - verifyPartitionData(group2Response.get(topic1And2List.get(0))) - assertTrue(group2Response.get(topic1And2List.get(1)).hasError) - assertTrue(group2Response.get(topic1And2List.get(2)).hasError) - assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group2Response.get(topic1And2List.get(1))) - assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group2Response.get(topic1And2List.get(2))) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(1)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group3" => - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, offsetFetchResponse.groupLevelError(groups(2))) - assertTrue(offsetFetchResponse.partitionDataMap(groups(2)).size() == 0) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), + offsetFetchResponse.group(g.groupId) + ) + case "group4" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(3)), offsetFetchResponse - .partitionDataMap(groups(3)), topic1List) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group5" => - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, offsetFetchResponse.groupLevelError(groups(4))) - assertTrue(offsetFetchResponse.partitionDataMap(groups(4)).size() == 0) - }) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), + offsetFetchResponse.group(g.groupId) + ) + } + } // test that after adding some of the ACLs, we get no group level authorization errors, but // still get topic level authorization errors for topics we don't have ACLs for addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(2)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(4)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(1)) + offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - offsetFetchResponse.data().groups().forEach(g => - g.groupId() match { + + offsetFetchResponse.data.groups.forEach { g => + g.groupId match { case "group1" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(0)), offsetFetchResponse - .partitionDataMap(groups(0)), topic1List) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group2" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(1)), offsetFetchResponse - .partitionDataMap(groups(1)), topic1And2List) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(1)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group3" => - assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(groups(2))) - val group3Response = offsetFetchResponse.partitionDataMap(groups(2)) - assertTrue(group3Response.size() == 6) - assertTrue(group3Response.keySet().containsAll(allTopicsList)) - verifyPartitionData(group3Response.get(allTopicsList.get(0))) - verifyPartitionData(group3Response.get(allTopicsList.get(1))) - verifyPartitionData(group3Response.get(allTopicsList.get(2))) - assertTrue(group3Response.get(allTopicsList.get(3)).hasError) - assertTrue(group3Response.get(allTopicsList.get(4)).hasError) - assertTrue(group3Response.get(allTopicsList.get(5)).hasError) - assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(3))) - assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(4))) - assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(5))) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(1)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(2)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(2) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setMetadata(OffsetFetchResponse.NO_METADATA) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group4" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(3)), offsetFetchResponse - .partitionDataMap(groups(3)), topic1And2List) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(1)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + case "group5" => - verifyResponse(offsetFetchResponse.groupLevelError(groups(4)), offsetFetchResponse - .partitionDataMap(groups(4)), topic1And2List) - }) + assertResponse( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(g.groupId) + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(0)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(topics(1)) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpoch.get) + .setMetadata(metadata) + ).asJava) + ).asJava), + offsetFetchResponse.group(g.groupId) + ) + } + } // test that after adding all necessary ACLs, we get no partition level or group level errors // from the offsetFetch response addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(2)) offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - offsetFetchResponse.data.groups.asScala.map(_.groupId).foreach( groupId => - verifyResponse(offsetFetchResponse.groupLevelError(groupId), offsetFetchResponse.partitionDataMap(groupId), partitionMap(groupId)) - ) + offsetFetchResponse.data.groups.forEach { group => + assertEquals(Errors.NONE.code, group.errorCode) + group.topics.forEach { topic => + topic.partitions.forEach { partition => + assertEquals(Errors.NONE.code, partition.errorCode) + } + } + } } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1573,7 +2192,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumer.position(tp) } @@ -1584,7 +2203,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumer.position(tp) } @@ -1608,7 +2227,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testListOffsetsWithNoTopicAccess(groupProtocol: String): Unit = { val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(Set(tp).asJava)) + assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(java.util.Set.of(tp))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1617,24 +2236,22 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - consumer.endOffsets(Set(tp).asJava) + consumer.endOffsets(java.util.Set.of(tp)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeGroupApiWithNoGroupAcl(quorum: String): Unit = { + @Test + def testDescribeGroupApiWithNoGroupAcl(): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) - val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) + val result = createAdminClient().describeConsumerGroups(java.util.List.of(group)) JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.describedGroups().get(group)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeGroupApiWithGroupDescribe(quorum: String): Unit = { + @Test + def testDescribeGroupApiWithGroupDescribe(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) - val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) + val result = createAdminClient().describeConsumerGroups(java.util.List.of(group)) JTestUtils.assertFutureThrows(classOf[GroupIdNotFoundException], result.describedGroups().get(group)) } @@ -1654,13 +2271,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), new ResourcePattern(GROUP, group2, LITERAL)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.subscribe(Collections.singleton(topic)) + consumer.subscribe(util.Set.of(topic)) consumeRecords(consumer) val otherConsumerProps = new Properties otherConsumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, group2) val otherConsumer = createConsumer(configOverrides = otherConsumerProps) - otherConsumer.subscribe(Collections.singleton(topic)) + otherConsumer.subscribe(util.Set.of(topic)) consumeRecords(otherConsumer) val adminClient = createAdminClient() @@ -1696,9 +2313,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) - createAdminClient().deleteConsumerGroups(Seq(group).asJava).deletedGroups().get(group).get() + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) + createAdminClient().deleteConsumerGroups(java.util.List.of(group)).deletedGroups().get(group).get() } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -1709,16 +2326,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) - val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) + val result = createAdminClient().deleteConsumerGroups(java.util.List.of(group)) JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.deletedGroups().get(group)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteGroupApiWithNoDeleteGroupAcl2(quorum: String): Unit = { - val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) + @Test + def testDeleteGroupApiWithNoDeleteGroupAcl2(): Unit = { + val result = createAdminClient().deleteConsumerGroups(java.util.List.of(group)) JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.deletedGroups().get(group)) } @@ -1731,10 +2347,10 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) consumer.close() - val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) assertNull(result.partitionResult(tp).get()) } @@ -1746,10 +2362,10 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) consumer.close() - val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.all()) } @@ -1761,29 +2377,27 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) + consumer.assign(java.util.List.of(tp)) + consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) consumer.close() // Remove the topic ACL & Check that it does not work without it removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) - val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], result.all()) JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], result.partitionResult(tp)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteGroupOffsetsWithNoAcl(quorum: String): Unit = { - val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + @Test + def testDeleteGroupOffsetsWithNoAcl(): Unit = { + val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.all()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterGroupConfigsWithAlterAcl(quorum: String): Unit = { + @Test + def testIncrementalAlterGroupConfigsWithAlterAcl(): Unit = { addAndVerifyAcls(groupAlterConfigsAcl(groupResource), groupResource) val request = incrementalAlterGroupConfigsRequest @@ -1791,9 +2405,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterGroupConfigsWithOperationAll(quorum: String): Unit = { + @Test + def testIncrementalAlterGroupConfigsWithOperationAll(): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -1802,9 +2415,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterGroupConfigsWithoutAlterAcl(quorum: String): Unit = { + @Test + def testIncrementalAlterGroupConfigsWithoutAlterAcl(): Unit = { removeAllClientAcls() val request = incrementalAlterGroupConfigsRequest @@ -1812,9 +2424,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeGroupConfigsWithDescribeAcl(quorum: String): Unit = { + @Test + def testDescribeGroupConfigsWithDescribeAcl(): Unit = { addAndVerifyAcls(groupDescribeConfigsAcl(groupResource), groupResource) val request = describeGroupConfigsRequest @@ -1822,9 +2433,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeGroupConfigsWithOperationAll(quorum: String): Unit = { + @Test + def testDescribeGroupConfigsWithOperationAll(): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -1833,9 +2443,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeGroupConfigsWithoutDescribeAcl(quorum: String): Unit = { + @Test + def testDescribeGroupConfigsWithoutDescribeAcl(): Unit = { removeAllClientAcls() val request = describeGroupConfigsRequest @@ -1843,42 +2452,37 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedDeleteTopicsWithoutDescribe(quorum: String): Unit = { + @Test + def testUnauthorizedDeleteTopicsWithoutDescribe(): Unit = { val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteResponse.data.responses.find(topic).errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedDeleteTopicsWithDescribe(quorum: String): Unit = { + @Test + def testUnauthorizedDeleteTopicsWithDescribe(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteResponse.data.responses.find(topic).errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteTopicsWithWildCardAuth(quorum: String): Unit = { + @Test + def testDeleteTopicsWithWildCardAuth(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.NONE.code, deleteResponse.data.responses.find(topic).errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedDeleteRecordsWithoutDescribe(quorum: String): Unit = { + @Test + def testUnauthorizedDeleteRecordsWithoutDescribe(): Unit = { val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteRecordsResponse.data.topics.asScala.head. partitions.asScala.head.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedDeleteRecordsWithDescribe(quorum: String): Unit = { + @Test + def testUnauthorizedDeleteRecordsWithDescribe(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) @@ -1886,9 +2490,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { partitions.asScala.head.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteRecordsWithWildCardAuth(quorum: String): Unit = { + @Test + def testDeleteRecordsWithWildCardAuth(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) @@ -1896,40 +2499,35 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { partitions.asScala.head.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedCreatePartitions(quorum: String): Unit = { + @Test + def testUnauthorizedCreatePartitions(): Unit = { val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, createPartitionsResponse.data.results.asScala.head.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreatePartitionsWithWildCardAuth(quorum: String): Unit = { + @Test + def testCreatePartitionsWithWildCardAuth(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest) assertEquals(Errors.NONE.code, createPartitionsResponse.data.results.asScala.head.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTransactionalProducerInitTransactionsNoWriteTransactionalIdAcl(quorum: String): Unit = { + @Test + def testTransactionalProducerInitTransactionsNoWriteTransactionalIdAcl(): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), transactionalIdResource) val producer = buildTransactionalProducer() assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.initTransactions()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTransactionalProducerInitTransactionsNoDescribeTransactionalIdAcl(quorum: String): Unit = { + @Test + def testTransactionalProducerInitTransactionsNoDescribeTransactionalIdAcl(): Unit = { val producer = buildTransactionalProducer() assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.initTransactions()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testSendOffsetsWithNoConsumerGroupDescribeAccess(quorum: String): Unit = { + @Test + def testSendOffsetsWithNoConsumerGroupDescribeAccess(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CLUSTER_ACTION, ALLOW)), clusterResource) @@ -1940,12 +2538,11 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.beginTransaction() assertThrows(classOf[GroupAuthorizationException], - () => producer.sendOffsetsToTransaction(Map(tp -> new OffsetAndMetadata(0L)).asJava, new ConsumerGroupMetadata(group))) + () => producer.sendOffsetsToTransaction(java.util.Map.of(tp, new OffsetAndMetadata(0L)), new ConsumerGroupMetadata(group))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testSendOffsetsWithNoConsumerGroupWriteAccess(quorum: String): Unit = { + @Test + def testSendOffsetsWithNoConsumerGroupWriteAccess(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -1955,12 +2552,11 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.beginTransaction() assertThrows(classOf[GroupAuthorizationException], - () => producer.sendOffsetsToTransaction(Map(tp -> new OffsetAndMetadata(0L)).asJava, new ConsumerGroupMetadata(group))) + () => producer.sendOffsetsToTransaction(java.util.Map.of(tp, new OffsetAndMetadata(0L)), new ConsumerGroupMetadata(group))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIdempotentProducerNoIdempotentWriteAclInInitProducerId(quorum: String): Unit = { + @Test + def testIdempotentProducerNoIdempotentWriteAclInInitProducerId(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) assertIdempotentSendAuthorizationFailure() @@ -1997,9 +2593,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertClusterAuthFailure() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIdempotentProducerNoIdempotentWriteAclInProduce(quorum: String): Unit = { + @Test + def testIdempotentProducerNoIdempotentWriteAclInProduce(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, IDEMPOTENT_WRITE, ALLOW)), clusterResource) @@ -2026,17 +2621,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertTrue(e.getCause.isInstanceOf[TopicAuthorizationException]) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldInitTransactionsWhenAclSet(quorum: String): Unit = { + @Test + def shouldInitTransactionsWhenAclSet(): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) val producer = buildTransactionalProducer() producer.initTransactions() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTransactionalProducerTopicAuthorizationExceptionInSendCallback(quorum: String): Unit = { + @Test + def testTransactionalProducerTopicAuthorizationExceptionInSendCallback(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2051,9 +2644,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertEquals(Set(topic), e.unauthorizedTopics.asScala) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTransactionalProducerTopicAuthorizationExceptionInCommit(quorum: String): Unit = { + @Test + def testTransactionalProducerTopicAuthorizationExceptionInCommit(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2069,9 +2661,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessDuringSend(quorum: String): Unit = { + @Test + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessDuringSend(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2084,9 +2675,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { JTestUtils.assertFutureThrows(classOf[TransactionalIdAuthorizationException], future) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnEndTransaction(quorum: String): Unit = { + @Test + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnEndTransaction(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2099,9 +2689,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.commitTransaction()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testListTransactionsAuthorization(quorum: String): Unit = { + @Test + def testListTransactionsAuthorization(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2133,9 +2722,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertListTransactionResult(expectedTransactionalIds = Set(transactionalId)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldNotIncludeUnauthorizedTopicsInDescribeTransactionsResponse(quorum: String): Unit = { + @Test + def shouldNotIncludeUnauthorizedTopicsInDescribeTransactionsResponse(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2156,9 +2744,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertEquals(List.empty, transactionStateData.topics.asScala.toList) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldSuccessfullyAbortTransactionAfterTopicAuthorizationException(quorum: String): Unit = { + @Test + def shouldSuccessfullyAbortTransactionAfterTopicAuthorizationException(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2176,9 +2763,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.abortTransaction() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnSendOffsetsToTxn(quorum: String): Unit = { + @Test + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnSendOffsetsToTxn(): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), groupResource) val producer = buildTransactionalProducer() @@ -2188,15 +2774,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // In transaction V2, the server receives the offset commit request first, so the error is GroupAuthorizationException // instead of TransactionalIdAuthorizationException. assertThrows(classOf[GroupAuthorizationException], () => { - val offsets = Map(tp -> new OffsetAndMetadata(1L)).asJava + val offsets = java.util.Map.of(tp, new OffsetAndMetadata(1L)) producer.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(group)) producer.commitTransaction() }) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldSendSuccessfullyWhenIdempotentAndHasCorrectACL(quorum: String): Unit = { + @Test + def shouldSendSuccessfullyWhenIdempotentAndHasCorrectACL(): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, IDEMPOTENT_WRITE, ALLOW)), clusterResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2205,18 +2790,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } // Verify that metadata request without topics works without any ACLs and returns cluster id - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterId(quorum: String): Unit = { - val request = new requests.MetadataRequest.Builder(List.empty.asJava, false).build() + @Test + def testClusterId(): Unit = { + val request = new requests.MetadataRequest.Builder(java.util.List.of, false).build() val response = connectAndReceive[MetadataResponse](request) - assertEquals(Collections.emptyMap, response.errorCounts) + assertEquals(util.Map.of, response.errorCounts) assertFalse(response.clusterId.isEmpty, "Cluster id not returned") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testRetryProducerInitializationAfterPermissionFix(quorum: String): Unit = { + @Test + def testRetryProducerInitializationAfterPermissionFix(): Unit = { createTopicWithBrokerPrincipal(topic) val wildcard = new ResourcePattern(TOPIC, ResourcePattern.WILDCARD_RESOURCE, LITERAL) val prefixed = new ResourcePattern(TOPIC, "t", PREFIXED) @@ -2238,9 +2821,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.close() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizeByResourceTypeMultipleAddAndRemove(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypeMultipleAddAndRemove(): Unit = { createTopicWithBrokerPrincipal(topic) for (_ <- 1 to 3) { @@ -2256,9 +2838,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizeByResourceTypeIsolationUnrelatedDenyWontDominateAllow(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypeIsolationUnrelatedDenyWontDominateAllow(): Unit = { createTopicWithBrokerPrincipal(topic) createTopicWithBrokerPrincipal("topic-2") createTopicWithBrokerPrincipal("to") @@ -2279,9 +2860,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendSuccess() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizeByResourceTypeDenyTakesPrecedence(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypeDenyTakesPrecedence(): Unit = { createTopicWithBrokerPrincipal(topic) val allowWriteAce = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW) addAndVerifyAcls(Set(allowWriteAce), topicResource) @@ -2292,9 +2872,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizeByResourceTypeWildcardResourceDenyDominate(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypeWildcardResourceDenyDominate(): Unit = { createTopicWithBrokerPrincipal(topic) val wildcard = new ResourcePattern(TOPIC, ResourcePattern.WILDCARD_RESOURCE, LITERAL) val prefixed = new ResourcePattern(TOPIC, "t", PREFIXED) @@ -2310,9 +2889,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAuthorizeByResourceTypePrefixedResourceDenyDominate(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypePrefixedResourceDenyDominate(): Unit = { createTopicWithBrokerPrincipal(topic) val prefixed = new ResourcePattern(TOPIC, topic.substring(0, 1), PREFIXED) val literal = new ResourcePattern(TOPIC, topic, LITERAL) @@ -2324,9 +2902,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMetadataClusterAuthorizedOperationsWithoutDescribeCluster(quorum: String): Unit = { + @Test + def testMetadataClusterAuthorizedOperationsWithoutDescribeCluster(): Unit = { removeAllClientAcls() // MetadataRequest versions older than 1 are not supported. @@ -2335,9 +2912,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMetadataClusterAuthorizedOperationsWithDescribeAndAlterCluster(quorum: String): Unit = { + @Test + def testMetadataClusterAuthorizedOperationsWithDescribeAndAlterCluster(): Unit = { removeAllClientAcls() val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) @@ -2356,9 +2932,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeTopicAclWithOperationAll(quorum: String): Unit = { + @Test + def testDescribeTopicAclWithOperationAll(): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -2369,7 +2944,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setName(topic) val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(Collections.singletonList(metadataRequestTopic)) + .setTopics(util.List.of(metadataRequestTopic)) .setAllowAutoTopicCreation(false) ).build() @@ -2381,9 +2956,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertEquals(Errors.NONE, topicResponse.error) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeTopicConfigsAclWithOperationAll(quorum: String): Unit = { + @Test + def testDescribeTopicConfigsAclWithOperationAll(): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -2391,7 +2965,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() - .setResources(Collections.singletonList(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic))) ).build() @@ -2406,7 +2980,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { expectedClusterAuthorizedOperations: Int ): Unit = { val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(Collections.emptyList()) + .setTopics(util.List.of) .setAllowAutoTopicCreation(true) .setIncludeClusterAuthorizedOperations(true)) .build(version) @@ -2421,9 +2995,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeClusterClusterAuthorizedOperationsWithoutDescribeCluster(quorum: String): Unit = { + @Test + def testDescribeClusterClusterAuthorizedOperationsWithoutDescribeCluster(): Unit = { removeAllClientAcls() for (version <- ApiKeys.DESCRIBE_CLUSTER.oldestVersion to ApiKeys.DESCRIBE_CLUSTER.latestVersion) { @@ -2431,9 +3004,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeClusterClusterAuthorizedOperationsWithDescribeAndAlterCluster(quorum: String): Unit = { + @Test + def testDescribeClusterClusterAuthorizedOperationsWithDescribeAndAlterCluster(): Unit = { removeAllClientAcls() val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) @@ -2451,9 +3023,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHostAddressBasedAcls(quorum: String): Unit = { + @Test + def testHostAddressBasedAcls(): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -2469,7 +3040,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setName(topic) val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(Collections.singletonList(metadataRequestTopic)) + .setTopics(util.List.of(metadataRequestTopic)) .setAllowAutoTopicCreation(false) ).build() @@ -2493,9 +3064,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertDoesNotThrow(closeConsumer, "Exception not expected on closing consumer") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithGroupReadAndTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithGroupReadAndTopicDescribeAcl(): Unit = { addAndVerifyAcls(groupReadAcl(groupResource), groupResource) addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) @@ -2504,9 +3074,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithOperationAll(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithOperationAll(): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) @@ -2516,9 +3085,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(): Unit = { removeAllClientAcls() val request = consumerGroupHeartbeatRequest @@ -2526,9 +3094,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithoutGroupReadAcl(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithoutGroupReadAcl(): Unit = { addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) val request = consumerGroupHeartbeatRequest @@ -2537,9 +3104,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithoutTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithoutTopicDescribeAcl(): Unit = { addAndVerifyAcls(groupReadAcl(groupResource), groupResource) val request = consumerGroupHeartbeatRequest @@ -2548,9 +3114,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithRegex(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithRegex(): Unit = { createTopicWithBrokerPrincipal(topic) val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -2560,9 +3125,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendAndReceiveRegexHeartbeat(response, listenerName, Some(1)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithRegexWithoutTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupHeartbeatWithRegexWithoutTopicDescribeAcl(): Unit = { createTopicWithBrokerPrincipal(topic) val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -2571,25 +3135,47 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendAndReceiveRegexHeartbeat(response, listenerName, None) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupHeartbeatWithRegexWithDifferentMemberAcls(quorum: String): Unit = { - createTopicWithBrokerPrincipal(topic, numPartitions = 2) + @Test + def testConsumerGroupHeartbeatWithRegexWithTopicDescribeAclAddedAndRemoved(): Unit = { + createTopicWithBrokerPrincipal(topic) val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) - // Member on inter-broker listener has all access and is assigned the matching topic - var member1Response = sendAndReceiveFirstRegexHeartbeat("memberWithAllAccess", interBrokerListenerName) - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(2)) + val memberId = Uuid.randomUuid.toString; + var response = sendAndReceiveFirstRegexHeartbeat(memberId, listenerName) + TestUtils.tryUntilNoAssertionError() { + response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(0), true) + } - // Member on client listener has no topic describe access, but is assigned a partition of the - // unauthorized topic. This is leaking unauthorized topic metadata to member2. Simply filtering out - // the topic from the assignment in the response is not sufficient since different assignment states - // in the broker and client can lead to other issues. This needs to be fixed properly by using - // member permissions while computing assignments. - var member2Response = sendAndReceiveFirstRegexHeartbeat("memberWithLimitedAccess", listenerName) + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + TestUtils.tryUntilNoAssertionError(waitTime = 25000) { + response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(1)) + } + + removeAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + TestUtils.tryUntilNoAssertionError(waitTime = 25000) { + response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(0)) + } + } + + @Test + def testConsumerGroupHeartbeatWithRegexWithDifferentMemberAcls(): Unit = { + createTopicWithBrokerPrincipal(topic, numPartitions = 2) + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) + + // Member on inter-broker listener has all access and is assigned the matching topic + var member1Response = sendAndReceiveFirstRegexHeartbeat("memberWithAllAccess", interBrokerListenerName) + member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(2)) + + // Member on client listener has no topic describe access, but is assigned a partition of the + // unauthorized topic. This is leaking unauthorized topic metadata to member2. Simply filtering out + // the topic from the assignment in the response is not sufficient since different assignment states + // in the broker and client can lead to other issues. This needs to be fixed properly by using + // member permissions while computing assignments. + var member2Response = sendAndReceiveFirstRegexHeartbeat("memberWithLimitedAccess", listenerName) member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(1)) - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, None, fullRequest = true) + member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(1), fullRequest = true) member2Response = sendAndReceiveRegexHeartbeat(member2Response, listenerName, Some(1)) // Create another topic and send heartbeats on member1 to trigger regex refresh @@ -2609,6 +3195,844 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(0), fullRequest = true) } + @Test + def testShareGroupHeartbeatWithGroupReadAndTopicDescribeAcl(): Unit = { + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = shareGroupHeartbeatRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareGroupHeartbeatWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = shareGroupHeartbeatRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(): Unit = { + removeAllClientAcls() + + val request = shareGroupHeartbeatRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareGroupHeartbeatWithoutGroupReadAcl(): Unit = { + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = shareGroupHeartbeatRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareGroupHeartbeatWithoutTopicDescribeAcl(): Unit = { + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + + val request = shareGroupHeartbeatRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + private def createShareGroupToDescribe(): Unit = { + createTopicWithBrokerPrincipal(topic) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), shareGroupResource) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) + shareConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroup) + val consumer = createShareConsumer() + consumer.subscribe(util.Set.of(topic)) + consumer.poll(Duration.ofMillis(500L)) + removeAllClientAcls() + } + + private def createEmptyShareGroup(): Unit = { + createTopicWithBrokerPrincipal(topic) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), shareGroupResource) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) + shareConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroup) + val consumer = createShareConsumer() + consumer.subscribe(util.Set.of(topic)) + consumer.poll(Duration.ofMillis(500L)) + consumer.close() + removeAllClientAcls() + } + + @Test + def testShareGroupDescribeWithGroupDescribeAndTopicDescribeAcl(): Unit = { + createShareGroupToDescribe() + addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = shareGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareGroupDescribeWithOperationAll(): Unit = { + createShareGroupToDescribe() + + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = shareGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareGroupDescribeWithoutGroupDescribeAcl(): Unit = { + createShareGroupToDescribe() + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = shareGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { + createShareGroupToDescribe() + + val request = shareGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareFetchWithGroupReadAndTopicReadAcl(): Unit = { + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = createShareFetchRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareFetchWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = createShareFetchRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareFetchWithoutGroupReadOrTopicReadAcl(): Unit = { + removeAllClientAcls() + + val request = createShareFetchRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareFetchWithoutGroupReadAcl(): Unit = { + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = createShareFetchRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareFetchWithoutTopicReadAcl(): Unit = { + createTopicWithBrokerPrincipal(topic) + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + + val request = createShareFetchRequest + val response = connectAndReceive[ShareFetchResponse](request, listenerName = listenerName) + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.data.responses.stream().findFirst().get().partitions.get(0).errorCode)) + } + + @Test + def testShareAcknowledgeWithGroupReadAndTopicReadAcl(): Unit = { + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = shareAcknowledgeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareAcknowledgeWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = shareAcknowledgeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testShareAcknowledgeWithoutGroupReadOrTopicReadAcl(): Unit = { + removeAllClientAcls() + + val request = shareAcknowledgeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testShareAcknowledgeFetchWithoutGroupReadAcl(): Unit = { + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = shareAcknowledgeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testInitializeShareGroupStateWithClusterAcl(): Unit = { + addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) + + val request = initializeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testInitializeShareGroupStateWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) + + val request = initializeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testInitializeShareGroupStateWithoutClusterAcl(): Unit = { + removeAllClientAcls() + + val request = initializeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testReadShareGroupStateWithClusterAcl(): Unit = { + addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) + + val request = readShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testReadShareGroupStateWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) + + val request = readShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testReadShareGroupStateWithoutClusterAcl(): Unit = { + removeAllClientAcls() + + val request = readShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testWriteShareGroupStateWithClusterAcl(): Unit = { + addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) + + val request = writeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testWriteShareGroupStateWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) + + val request = writeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testWriteShareGroupStateWithoutClusterAcl(): Unit = { + removeAllClientAcls() + + val request = writeShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDeleteShareGroupStateWithClusterAcl(): Unit = { + addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) + + val request = deleteShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDeleteShareGroupStateWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) + + val request = deleteShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDeleteShareGroupStateWithoutClusterAcl(): Unit = { + removeAllClientAcls() + + val request = deleteShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testReadShareGroupStateSummaryWithClusterAcl(): Unit = { + addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) + + val request = readShareGroupStateSummaryRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testReadShareGroupStateSummaryWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) + + val request = readShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testReadShareGroupStateSummaryWithoutClusterAcl(): Unit = { + removeAllClientAcls() + + val request = readShareGroupStateRequest + val resource = Set[ResourceType](CLUSTER) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDescribeShareGroupOffsetsWithGroupDescribeAndTopicDescribeAcl(): Unit = { + addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = describeShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDescribeShareGroupOffsetsWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = describeShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDescribeShareGroupOffsetsWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { + removeAllClientAcls() + + val request = describeShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDescribeShareGroupOffsetsWithoutGroupDescribeAcl(): Unit = { + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = describeShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDescribeShareGroupOffsetsWithoutTopicDescribeAcl(): Unit = { + addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) + + val request = describeShareGroupOffsetsRequest + val response = connectAndReceive[DescribeShareGroupOffsetsResponse](request, listenerName = listenerName) + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.data.groups.get(0).topics.get(0).partitions.get(0).errorCode)) + } + + @Test + def testDeleteShareGroupOffsetsWithGroupDeleteAndTopicReadAcl(): Unit = { + addAndVerifyAcls(shareGroupDeleteAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = deleteShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDeleteShareGroupOffsetsWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = deleteShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testDeleteShareGroupOffsetsWithoutGroupDeleteOrTopicReadAcl(): Unit = { + removeAllClientAcls() + + val request = deleteShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDeleteShareGroupOffsetsWithoutGroupDeleteAcl(): Unit = { + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = deleteShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testDeleteShareGroupOffsetsWithoutTopicReadAcl(): Unit = { + createEmptyShareGroup() + addAndVerifyAcls(shareGroupDeleteAcl(shareGroupResource), shareGroupResource) + + val request = deleteShareGroupOffsetsRequest + val response = connectAndReceive[DeleteShareGroupOffsetsResponse](request, listenerName = listenerName) + assertEquals(1, response.data.responses.size) + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.responses.get(0).errorCode, s"Unexpected response $response") + } + + @Test + def testAlterShareGroupOffsetsWithGroupReadAndTopicReadAcl(): Unit = { + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = alterShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testAlterShareGroupOffsetsWithOperationAll(): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = alterShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @Test + def testAlterShareGroupOffsetsWithoutGroupReadOrTopicReadAcl(): Unit = { + removeAllClientAcls() + + val request = alterShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testAlterShareGroupOffsetsWithoutGroupReadAcl(): Unit = { + addAndVerifyAcls(topicReadAcl(topicResource), topicResource) + + val request = alterShareGroupOffsetsRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @Test + def testAlterShareGroupOffsetsWithoutTopicReadAcl(): Unit = { + createEmptyShareGroup() + addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) + + val request = alterShareGroupOffsetsRequest + val response = connectAndReceive[AlterShareGroupOffsetsResponse](request, listenerName = listenerName) + assertEquals(1, response.data.responses.size) + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.responses.stream().findFirst().get().partitions.get(0).errorCode, s"Unexpected response $response") + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupHeartbeatWithGroupReadAndTopicDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupHeartbeatWithOperationAll( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), streamsGroupResource) + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + addAndVerifyAcls(Set(allowAllOpsAcl), sourceTopicResource) + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + removeAllClientAcls() + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupHeartbeatWithoutGroupReadAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupHeartbeatWithoutTopicDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false", + "false, true" + )) + def testStreamsGroupHeartbeatWithoutInternalTopicCreateAcl( + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createTopicWithBrokerPrincipal(sourceTopic) + addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic = false, + topicAsRepartitionSinkTopic = false, + topicAsRepartitionSourceTopic = topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics = topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + + // Request successful, but internal topic not created. + val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true).asInstanceOf[StreamsGroupHeartbeatResponse] + assertEquals( + util.List.of(new StreamsGroupHeartbeatResponseData.Status() + .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) + .setStatusDetail("Internal topics are missing: [topic]; Unauthorized to CREATE on topics topic.")), + response.data().status()) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false", + "false, true" + )) + def testStreamsGroupHeartbeatWithInternalTopicCreateAcl( + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createTopicWithBrokerPrincipal(sourceTopic) + addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + addAndVerifyAcls(topicCreateAcl(topicResource), topicResource) + + val request = streamsGroupHeartbeatRequest( + topicAsSourceTopic = false, + topicAsRepartitionSinkTopic = false, + topicAsRepartitionSourceTopic = topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics = topicAsStateChangelogTopics + ) + val resource = Set[ResourceType](GROUP, TOPIC) + val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true).asInstanceOf[StreamsGroupHeartbeatResponse] + // Request successful, and no internal topic creation error. + assertEquals( + util.List.of(new StreamsGroupHeartbeatResponseData.Status() + .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) + .setStatusDetail("Internal topics are missing: [topic]")), + response.data().status()) + } + + private def createStreamsGroupToDescribe( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createTopicWithBrokerPrincipal(sourceTopic) + createTopicWithBrokerPrincipal(topic) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), streamsGroupResource) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), sourceTopicResource) + streamsConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, streamsGroup) + streamsConsumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + val consumer = createStreamsConsumer(streamsRebalanceData = new StreamsRebalanceData( + UUID.randomUUID(), + Optional.empty(), + util.Map.of( + "subtopology-0", new StreamsRebalanceData.Subtopology( + if (topicAsSourceTopic) util.Set.of(sourceTopic, topic) else util.Set.of(sourceTopic), + if (topicAsRepartitionSinkTopic) util.Set.of(topic) else util.Set.of(), + if (topicAsRepartitionSourceTopic) + util.Map.of(topic, new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())) + else util.Map.of(), + if (topicAsStateChangelogTopics) + util.Map.of(topic, new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())) + else util.Map.of(), + util.Set.of() + )), + Map.empty[String, String].asJava + )) + consumer.subscribe( + if (topicAsSourceTopic || topicAsRepartitionSourceTopic) util.Set.of(sourceTopic, topic) else util.Set.of(sourceTopic), + new StreamsRebalanceListener { + override def onTasksRevoked(tasks: util.Set[StreamsRebalanceData.TaskId]): Unit = () + override def onTasksAssigned(assignment: StreamsRebalanceData.Assignment): Unit = () + override def onAllTasksLost(): Unit = () + } + ) + consumer.poll(Duration.ofMillis(500L)) + removeAllClientAcls() + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupDescribeWithGroupDescribeAndTopicDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createStreamsGroupToDescribe( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + addAndVerifyAcls(streamsGroupDescribeAcl(streamsGroupResource), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = streamsGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupDescribeWithOperationAll( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createStreamsGroupToDescribe( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + + val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) + addAndVerifyAcls(Set(allowAllOpsAcl), streamsGroupResource) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) + + val request = streamsGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupDescribeWithoutGroupDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createStreamsGroupToDescribe( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) + + val request = streamsGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + + @ParameterizedTest + @CsvSource(Array( + "true, false, false, false", + "false, true, false, false", + "false, false, true, false", + "false, false, false, true" + )) + def testStreamsGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl( + topicAsSourceTopic: Boolean, + topicAsRepartitionSinkTopic: Boolean, + topicAsRepartitionSourceTopic: Boolean, + topicAsStateChangelogTopics: Boolean + ): Unit = { + createStreamsGroupToDescribe( + topicAsSourceTopic, + topicAsRepartitionSinkTopic, + topicAsRepartitionSourceTopic, + topicAsStateChangelogTopics + ) + + val request = streamsGroupDescribeRequest + val resource = Set[ResourceType](GROUP, TOPIC) + addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic + + sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) + } + private def sendAndReceiveFirstRegexHeartbeat(memberId: String, listenerName: ListenerName): ConsumerGroupHeartbeatResponseData = { val request = new ConsumerGroupHeartbeatRequest.Builder( @@ -2617,7 +4041,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(5 * 60 * 1000) - .setTopicPartitions(Collections.emptyList()) + .setTopicPartitions(util.List.of()) .setSubscribedTopicRegex("^top.*")).build() val resource = Set[ResourceType](GROUP, TOPIC) val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true, listenerName = listenerName) @@ -2644,6 +4068,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { data = data .setTopicPartitions(partitions.asJava) .setSubscribedTopicRegex("^top.*") + .setRebalanceTimeoutMs(5 * 60 * 1000) } val request = new ConsumerGroupHeartbeatRequest.Builder(data).build() val resource = Set[ResourceType](GROUP, TOPIC) @@ -2667,14 +4092,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, "consumer") consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group) val consumer = createConsumer() - consumer.subscribe(Collections.singleton(topic)) + consumer.subscribe(util.Set.of(topic)) consumer.poll(Duration.ofMillis(500L)) removeAllClientAcls() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupDescribeWithGroupDescribeAndTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupDescribeWithGroupDescribeAndTopicDescribeAcl(): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(groupDescribeAcl(groupResource), groupResource) @@ -2685,9 +4109,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupDescribeWithOperationAll(quorum: String): Unit = { + @Test + def testConsumerGroupDescribeWithOperationAll(): Unit = { createConsumerGroupToDescribe() val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) @@ -2699,9 +4122,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupDescribeWithoutGroupDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupDescribeWithoutGroupDescribeAcl(): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) @@ -2711,9 +4133,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupDescribeWithoutTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupDescribeWithoutTopicDescribeAcl(): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(groupDescribeAcl(groupResource), groupResource) @@ -2723,9 +4144,8 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumerGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(quorum: String): Unit = { + @Test + def testConsumerGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { createConsumerGroupToDescribe() val request = consumerGroupDescribeRequest @@ -2750,7 +4170,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val aclEntryFilter = new AccessControlEntryFilter(clientPrincipalString, null, AclOperation.ANY, AclPermissionType.ANY) val aclFilter = new AclBindingFilter(ResourcePatternFilter.ANY, aclEntryFilter) - authorizerForWrite.deleteAcls(TestUtils.anonymousAuthorizableContext, List(aclFilter).asJava).asScala. + authorizerForWrite.deleteAcls(TestUtils.anonymousAuthorizableContext, java.util.List.of(aclFilter)).asScala. map(_.toCompletableFuture.get).flatMap { deletion => deletion.aclBindingDeleteResults().asScala.map(_.aclBinding.pattern).toSet }.foreach { resource => @@ -2861,16 +4281,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testPrefixAcls(quorum: String): Unit = { + @Test + def testPrefixAcls(): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), new ResourcePattern(TOPIC, "f", PREFIXED)) addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", WILDCARD_HOST, CREATE, DENY)), new ResourcePattern(TOPIC, "fooa", PREFIXED)) addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", WILDCARD_HOST, CREATE, ALLOW)), new ResourcePattern(TOPIC, "foob", PREFIXED)) - createAdminClient().createTopics(Collections. - singletonList(new NewTopic("foobar", 1, 1.toShort))).all().get() + createAdminClient().createTopics(util.List.of(new NewTopic("foobar", 1, 1.toShort))).all().get() } } diff --git a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala index 4e47c8661a0fb..16dec9dc00800 100644 --- a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala @@ -17,7 +17,7 @@ package kafka.api import java.util -import java.util.Properties +import java.util.{Optional, Properties} import java.util.concurrent.ExecutionException import kafka.utils.Logging import kafka.utils.TestUtils._ @@ -37,7 +37,6 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo, Timeout} import scala.jdk.CollectionConverters._ import scala.collection.Seq -import scala.jdk.OptionConverters.RichOption /** * Base integration test cases for [[Admin]]. Each test case added here will be executed @@ -73,12 +72,12 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg def testCreateDeleteTopics(): Unit = { client = createAdminClient val topics = Seq("mytopic", "mytopic2", "mytopic3") - val newTopics = Seq( - new NewTopic("mytopic", Map((0: Integer) -> Seq[Integer](1, 2).asJava, (1: Integer) -> Seq[Integer](2, 0).asJava).asJava), + val newTopics = util.List.of( + new NewTopic("mytopic", util.Map.of(0: Integer, util.List.of[Integer](1, 2), 1: Integer, util.List.of[Integer](2, 0))), new NewTopic("mytopic2", 3, 3.toShort), - new NewTopic("mytopic3", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) + new NewTopic("mytopic3", Optional.empty[Integer], Optional.empty[java.lang.Short]) ) - val validateResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)) + val validateResult = client.createTopics(newTopics, new CreateTopicsOptions().validateOnly(true)) validateResult.all.get() waitForTopics(client, List(), topics) @@ -93,7 +92,7 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg } validateMetadataAndConfigs(validateResult) - val createResult = client.createTopics(newTopics.asJava) + val createResult = client.createTopics(newTopics) createResult.all.get() waitForTopics(client, topics, List()) validateMetadataAndConfigs(createResult) @@ -103,7 +102,7 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg assertEquals(topicIds(topic), createResult.topicId(topic).get()) } - val failedCreateResult = client.createTopics(newTopics.asJava) + val failedCreateResult = client.createTopics(newTopics) val results = failedCreateResult.values() assertTrue(results.containsKey("mytopic")) assertFutureThrows(classOf[TopicExistsException], results.get("mytopic")) @@ -175,8 +174,8 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg assertEquals(expectedOperations, result.authorizedOperations().get()) val topic = "mytopic" - val newTopics = Seq(new NewTopic(topic, 3, 3.toShort)) - client.createTopics(newTopics.asJava).all.get() + val newTopics = util.List.of(new NewTopic(topic, 3, 3.toShort)) + client.createTopics(newTopics).all.get() waitForTopics(client, expectedPresent = Seq(topic), expectedMissing = List()) // without includeAuthorizedOperations flag @@ -251,7 +250,7 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg expectedNumPartitionsOpt: Option[Int] = None): TopicDescription = { var result: TopicDescription = null waitUntilTrue(() => { - val topicResult = client.describeTopics(Set(topic).asJava, describeOptions).topicNameValues().get(topic) + val topicResult = client.describeTopics(util.Set.of(topic), describeOptions).topicNameValues().get(topic) try { result = topicResult.get expectedNumPartitionsOpt.map(_ == result.partitions.size).getOrElse(true) diff --git a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala index 02e30b50c3e54..adfb657b77603 100644 --- a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala @@ -19,10 +19,9 @@ package kafka.api import kafka.utils.TestInfoUtils import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, GroupProtocol} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig} -import org.apache.kafka.common.header.Headers import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, PartitionInfo} import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer} +import org.apache.kafka.common.serialization.{Deserializer, Serializer} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource @@ -47,7 +46,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { val consumer = createConsumer() assertEquals(0, consumer.assignment.size) - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertEquals(1, consumer.assignment.size) consumer.seek(tp, 0) @@ -73,7 +72,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[BaseConsumerTest.TestClusterResourceListenerDeserializer]) consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[BaseConsumerTest.TestClusterResourceListenerDeserializer]) val consumer: Consumer[Array[Byte], Array[Byte]] = createConsumer(keyDeserializer = null, valueDeserializer = null, consumerProps) - consumer.subscribe(List(tp.topic()).asJava) + consumer.subscribe(java.util.List.of(tp.topic())) consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) assertNotEquals(0, BaseConsumerTest.updateProducerCount.get()) assertNotEquals(0, BaseConsumerTest.updateConsumerCount.get()) @@ -83,7 +82,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testCoordinatorFailover(groupProtocol: String): Unit = { val listener = new TestConsumerReassignmentListener() - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5001") this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") } @@ -91,7 +90,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "15000") val consumer = createConsumer() - consumer.subscribe(List(topic).asJava, listener) + consumer.subscribe(java.util.List.of(topic), listener) // the initial subscription should cause a callback execution awaitRebalance(consumer, listener) @@ -130,41 +129,4 @@ object BaseConsumerTest { override def onUpdate(clusterResource: ClusterResource): Unit = updateConsumerCount.incrementAndGet() override def deserialize(topic: String, data: Array[Byte]): Array[Byte] = data } - - class SerializerImpl extends Serializer[Array[Byte]] { - var serializer = new ByteArraySerializer() - - override def serialize(topic: String, headers: Headers, data: Array[Byte]): Array[Byte] = { - headers.add("content-type", "application/octet-stream".getBytes) - serializer.serialize(topic, data) - } - - override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = serializer.configure(configs, isKey) - - override def close(): Unit = serializer.close() - - override def serialize(topic: String, data: Array[Byte]): Array[Byte] = { - fail("method should not be invoked") - null - } - } - - class DeserializerImpl extends Deserializer[Array[Byte]] { - var deserializer = new ByteArrayDeserializer() - - override def deserialize(topic: String, headers: Headers, data: Array[Byte]): Array[Byte] = { - val header = headers.lastHeader("content-type") - assertEquals("application/octet-stream", if (header == null) null else new String(header.value())) - deserializer.deserialize(topic, data) - } - - override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = deserializer.configure(configs, isKey) - - override def close(): Unit = deserializer.close() - - override def deserialize(topic: String, data: Array[Byte]): Array[Byte] = { - fail("method should not be invoked") - null - } - } } diff --git a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala index e4894729b810f..add18b260cd20 100644 --- a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala @@ -19,7 +19,7 @@ package kafka.api import java.time.Duration import java.nio.charset.StandardCharsets -import java.util.{Collections, Properties} +import java.util.Properties import java.util.concurrent.TimeUnit import kafka.integration.KafkaServerTestHarness import kafka.security.JaasTestUtils @@ -43,22 +43,25 @@ import org.junit.jupiter.params.provider.MethodSource import scala.collection.mutable import scala.concurrent.ExecutionException -import scala.jdk.CollectionConverters._ import scala.jdk.javaapi.OptionConverters abstract class BaseProducerSendTest extends KafkaServerTestHarness { def generateConfigs: scala.collection.Seq[KafkaConfig] = { - val overridingProps = new Properties() val numServers = 2 - overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toShort) - overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 4.toString) TestUtils.createBrokerConfigs( numServers, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties - ).map(KafkaConfig.fromProps(_, overridingProps)) + ).map(KafkaConfig.fromProps(_, brokerOverrides)) + } + + protected def brokerOverrides: Properties = { + val overridingProps = new Properties() + overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toShort) + overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 4.toString) + overridingProps } private var consumer: Consumer[Array[Byte], Array[Byte]] = _ @@ -348,7 +351,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { assertEquals(partition, recordMetadata.partition) } - consumer.assign(List(new TopicPartition(topic, partition)).asJava) + consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) // make sure the fetched messages also respect the partitioning and ordering val records = TestUtils.consumeRecords(consumer, numRecords) @@ -396,7 +399,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { assertEquals(partition, recordMetadata.partition) } - consumer.assign(List(new TopicPartition(topic, partition)).asJava) + consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) // make sure the fetched messages also respect the partitioning and ordering val records = TestUtils.consumeRecords(consumer, numRecords) @@ -445,7 +448,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { val e = assertThrows(classOf[ExecutionException], () => producer.send(new ProducerRecord(topic, partition1, null, "value".getBytes(StandardCharsets.UTF_8))).get()) assertEquals(classOf[TimeoutException], e.getCause.getClass) - admin.createPartitions(Collections.singletonMap(topic, NewPartitions.increaseTo(2))).all().get() + admin.createPartitions(java.util.Map.of(topic, NewPartitions.increaseTo(2))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic, 0) @@ -505,7 +508,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { def testCloseWithZeroTimeoutFromCallerThread(groupProtocol: String): Unit = { TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, 2, 2) val partition = 0 - consumer.assign(List(new TopicPartition(topic, partition)).asJava) + consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, null, "value".getBytes(StandardCharsets.UTF_8)) @@ -531,7 +534,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { def testCloseWithZeroTimeoutFromSenderThread(groupProtocol: String): Unit = { TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, 1, 2) val partition = 0 - consumer.assign(List(new TopicPartition(topic, partition)).asJava) + consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, null, "value".getBytes(StandardCharsets.UTF_8)) // Test closing from sender thread. diff --git a/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala b/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala index ef1246d36d5d2..13eb169e0459e 100644 --- a/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala @@ -17,10 +17,10 @@ package kafka.api import java.time.Duration import java.util import java.util.concurrent.TimeUnit -import java.util.{Collections, Properties} +import java.util.Properties import com.yammer.metrics.core.{Histogram, Meter} import kafka.api.QuotaTestClients._ -import kafka.server.{ClientQuotaManager, KafkaBroker} +import kafka.server.KafkaBroker import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.admin.Admin import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} @@ -36,7 +36,7 @@ import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest @@ -165,7 +165,7 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { // Since producer may have been throttled after producing a couple of records, // consume from beginning till throttled - quotaTestClients.consumer.seekToBeginning(Collections.singleton(new TopicPartition(topic1, 0))) + quotaTestClients.consumer.seekToBeginning(util.Set.of(new TopicPartition(topic1, 0))) quotaTestClients.consumeUntilThrottled(numRecords + produced) quotaTestClients.verifyConsumeThrottle(expectThrottle = true) } @@ -177,7 +177,7 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { quotaTestClients.waitForQuotaUpdate(Long.MaxValue, Long.MaxValue, 0.1) val consumer = quotaTestClients.consumer - consumer.subscribe(Collections.singleton(topic1)) + consumer.subscribe(util.Set.of(topic1)) val endTimeMs = System.currentTimeMillis + 10000 var throttled = false while ((!throttled || quotaTestClients.exemptRequestMetric == null || metricValue(quotaTestClients.exemptRequestMetric) <= 0) @@ -236,7 +236,7 @@ abstract class QuotaTestClients(topic: String, def consumeUntilThrottled(maxRecords: Int, waitForRequestCompletion: Boolean = true): Int = { val timeoutMs = TimeUnit.MINUTES.toMillis(1) - consumer.subscribe(Collections.singleton(topic)) + consumer.subscribe(util.Set.of(topic)) var numConsumed = 0 var throttled = false val startMs = System.currentTimeMillis diff --git a/core/src/test/scala/integration/kafka/api/ClientOAuthIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/ClientOAuthIntegrationTest.scala new file mode 100644 index 0000000000000..22ab6f2673c9e --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ClientOAuthIntegrationTest.scala @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package integration.kafka.api + +import com.nimbusds.jose.jwk.RSAKey +import kafka.api.{IntegrationTestHarness, SaslSetup} +import kafka.utils.TestInfoUtils +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.common.config.{ConfigException, SaslConfigs} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Disabled, TestInfo} + +import java.util.{Base64, Collections, Properties} +import no.nav.security.mock.oauth2.{MockOAuth2Server, OAuth2Config} +import no.nav.security.mock.oauth2.token.{KeyProvider, OAuth2TokenProvider} +import org.apache.kafka.common.KafkaException +import org.apache.kafka.common.config.internals.BrokerSecurityConfigs +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.security.oauthbearer.{OAuthBearerLoginCallbackHandler, OAuthBearerLoginModule, OAuthBearerValidatorCallbackHandler} +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.test.TestUtils +import org.junit.jupiter.api.Assertions.{assertDoesNotThrow, assertThrows} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.io.File +import java.nio.ByteBuffer +import java.nio.channels.FileChannel +import java.nio.file.StandardOpenOption +import java.security.{KeyPairGenerator, PrivateKey} +import java.security.interfaces.RSAPublicKey +import java.util + +/** + * Integration tests for the consumer that cover basic usage as well as coordinator failure + */ +class ClientOAuthIntegrationTest extends IntegrationTestHarness with SaslSetup { + + override val brokerCount = 3 + + override protected def securityProtocol = SecurityProtocol.SASL_PLAINTEXT + override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) + override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism)) + + protected def kafkaClientSaslMechanism = "OAUTHBEARER" + protected def kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) + + val issuerId = "default" + var mockOAuthServer: MockOAuth2Server = _ + var privateKey: PrivateKey = _ + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + // Step 1: Generate the key pair dynamically. + val keyGen = KeyPairGenerator.getInstance("RSA") + keyGen.initialize(2048) + val keyPair = keyGen.generateKeyPair() + + privateKey = keyPair.getPrivate + + // Step 2: Create the RSA JWK from key pair. + val rsaJWK = new RSAKey.Builder(keyPair.getPublic.asInstanceOf[RSAPublicKey]) + .privateKey(privateKey) + .keyID("foo") + .build() + + // Step 3: Create the OAuth server using the keys just created + val keyProvider = new KeyProvider(Collections.singletonList(rsaJWK)) + val tokenProvider = new OAuth2TokenProvider(keyProvider) + val oauthConfig = new OAuth2Config(false, null, null, false, tokenProvider) + mockOAuthServer = new MockOAuth2Server(oauthConfig) + + mockOAuthServer.start() + val tokenEndpointUrl = mockOAuthServer.tokenEndpointUrl(issuerId).url().toString + val jwksUrl = mockOAuthServer.jwksUrl(issuerId).url().toString + System.setProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, s"$tokenEndpointUrl,$jwksUrl") + + val listenerNamePrefix = s"listener.name.${listenerName.value().toLowerCase}" + + serverConfig.setProperty(s"$listenerNamePrefix.oauthbearer.${SaslConfigs.SASL_JAAS_CONFIG}", s"${classOf[OAuthBearerLoginModule].getName} required ;") + serverConfig.setProperty(s"$listenerNamePrefix.oauthbearer.${SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE}", issuerId) + serverConfig.setProperty(s"$listenerNamePrefix.oauthbearer.${SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL}", jwksUrl) + serverConfig.setProperty(s"$listenerNamePrefix.oauthbearer.${BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG}", classOf[OAuthBearerValidatorCallbackHandler].getName) + + // create static config including client login context with credentials for JaasTestUtils 'client2' + startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism))) + + // The superuser needs the configuration in setUp because it's used to create resources before the individual + // test methods are invoked. + superuserClientConfig.putAll(defaultClientCredentialsConfigs()) + + super.setUp(testInfo) + } + + @AfterEach + override def tearDown(): Unit = { + if (mockOAuthServer != null) + mockOAuthServer.shutdown() + + closeSasl() + super.tearDown() + + System.clearProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG) + System.clearProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG) + } + + def defaultOAuthConfigs(): Properties = { + val tokenEndpointUrl = mockOAuthServer.tokenEndpointUrl(issuerId).url().toString + + val configs = new Properties() + configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol.name) + configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasClientLoginModule(kafkaClientSaslMechanism)) + configs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, classOf[OAuthBearerLoginCallbackHandler].getName) + configs.put(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, tokenEndpointUrl) + configs + } + + def defaultClientCredentialsConfigs(): Properties = { + val configs = defaultOAuthConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, "test-client") + configs.put(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, "test-secret") + configs + } + + def defaultJwtBearerConfigs(): Properties = { + val configs = defaultOAuthConfigs() + configs.put(SaslConfigs.SASL_JAAS_CONFIG, jaasClientLoginModule(kafkaClientSaslMechanism)) + configs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, classOf[OAuthBearerLoginCallbackHandler].getName) + configs.put(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, "org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever") + configs + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testBasicClientCredentials(groupProtocol: String): Unit = { + val configs = defaultClientCredentialsConfigs() + assertDoesNotThrow(() => createProducer(configOverrides = configs)) + assertDoesNotThrow(() => createConsumer(configOverrides = configs)) + assertDoesNotThrow(() => createAdminClient(configOverrides = configs)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testBasicJwtBearer(groupProtocol: String): Unit = { + val jwt = mockOAuthServer.issueToken(issuerId, "jdoe", "someaudience", Collections.singletonMap("scope", "test")) + val assertionFile = TestUtils.tempFile(jwt.serialize()) + System.setProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, assertionFile.getAbsolutePath) + + val configs = defaultJwtBearerConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, assertionFile.getAbsolutePath) + + assertDoesNotThrow(() => createProducer(configOverrides = configs)) + assertDoesNotThrow(() => createConsumer(configOverrides = configs)) + assertDoesNotThrow(() => createAdminClient(configOverrides = configs)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testBasicJwtBearer2(groupProtocol: String): Unit = { + val privateKeyFile = generatePrivateKeyFile() + System.setProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, privateKeyFile.getAbsolutePath) + + val configs = defaultJwtBearerConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, privateKeyFile.getPath) + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD, "default") + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB, "kafka-client-test-sub") + configs.put(SaslConfigs.SASL_OAUTHBEARER_SCOPE, "default") + // configs.put(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, "aud") + + assertDoesNotThrow(() => createProducer(configOverrides = configs)) + assertDoesNotThrow(() => createConsumer(configOverrides = configs)) + assertDoesNotThrow(() => createAdminClient(configOverrides = configs)) + } + + @Disabled("KAFKA-19394: Failure in ConsumerNetworkThread.initializeResources() can cause hangs on AsyncKafkaConsumer.close()") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testJwtBearerWithMalformedAssertionFile(groupProtocol: String): Unit = { + // Create the assertion file, but fill it with non-JWT garbage. + val assertionFile = TestUtils.tempFile("CQEN*)Q#F)&)^#QNC") + System.setProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, assertionFile.getAbsolutePath) + + val configs = defaultJwtBearerConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, assertionFile.getAbsolutePath) + + assertThrows(classOf[KafkaException], () => createProducer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createConsumer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createAdminClient(configOverrides = configs)) + } + + @Disabled("KAFKA-19394: Failure in ConsumerNetworkThread.initializeResources() can cause hangs on AsyncKafkaConsumer.close()") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testJwtBearerWithEmptyAssertionFile(groupProtocol: String): Unit = { + // Create the assertion file, but leave it empty. + val assertionFile = TestUtils.tempFile() + System.setProperty(BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, assertionFile.getAbsolutePath) + + val configs = defaultJwtBearerConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, assertionFile.getAbsolutePath) + + assertThrows(classOf[KafkaException], () => createProducer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createConsumer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createAdminClient(configOverrides = configs)) + } + + @Disabled("KAFKA-19394: Failure in ConsumerNetworkThread.initializeResources() can cause hangs on AsyncKafkaConsumer.close()") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testJwtBearerWithMissingAssertionFile(groupProtocol: String): Unit = { + val missingFileName = "/this/does/not/exist.txt" + + val configs = defaultJwtBearerConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, missingFileName) + + assertThrows(classOf[KafkaException], () => createProducer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createConsumer(configOverrides = configs)) + assertThrows(classOf[KafkaException], () => createAdminClient(configOverrides = configs)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testUnsupportedJwtRetriever(groupProtocol: String): Unit = { + val className = "org.apache.kafka.common.security.oauthbearer.ThisIsNotARealJwtRetriever" + + val configs = defaultOAuthConfigs() + configs.put(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, className) + + assertThrows(classOf[ConfigException], () => createProducer(configOverrides = configs)) + assertThrows(classOf[ConfigException], () => createConsumer(configOverrides = configs)) + assertThrows(classOf[ConfigException], () => createAdminClient(configOverrides = configs)) + } + + def generatePrivateKeyFile(): File = { + val file = File.createTempFile("private-", ".key") + val bytes = Base64.getEncoder.encode(privateKey.getEncoded) + var channel: FileChannel = null + + try { + channel = FileChannel.open(file.toPath, util.EnumSet.of(StandardOpenOption.WRITE)) + Utils.writeFully(channel, ByteBuffer.wrap(bytes)) + } finally { + channel.close() + } + + file + } +} \ No newline at end of file diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index ff8cef56e56db..43c33e617de97 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -13,28 +13,20 @@ package kafka.api -import java.{time, util} import java.util.concurrent._ -import java.util.{Collections, Properties} +import java.util.Properties import kafka.server.KafkaConfig import kafka.utils.{Logging, TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.GroupMaxSizeReachedException -import org.apache.kafka.common.message.FindCoordinatorRequestData -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{FindCoordinatorRequest, FindCoordinatorResponse} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} -import org.apache.kafka.server.util.ShutdownableThread import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Disabled, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import java.time.Duration -import scala.jdk.CollectionConverters._ import scala.collection.{Seq, mutable} /** @@ -98,230 +90,6 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumptionWithBrokerFailures(groupProtocol: String): Unit = consumeWithBrokerFailures(10) - - /* - * 1. Produce a bunch of messages - * 2. Then consume the messages while killing and restarting brokers at random - */ - def consumeWithBrokerFailures(numIters: Int): Unit = { - val numRecords = 1000 - val producer = createProducer() - producerSend(producer, numRecords) - - var consumed = 0L - val consumer = createConsumer() - - consumer.subscribe(Collections.singletonList(topic)) - - val scheduler = new BounceBrokerScheduler(numIters) - try { - scheduler.start() - - while (scheduler.isRunning) { - val records = consumer.poll(Duration.ofMillis(100)).asScala - - for (record <- records) { - assertEquals(consumed, record.offset()) - consumed += 1 - } - - if (records.nonEmpty) { - consumer.commitSync() - assertEquals(consumer.position(tp), consumer.committed(Set(tp).asJava).get(tp).offset) - - if (consumer.position(tp) == numRecords) { - consumer.seekToBeginning(Collections.emptyList()) - consumed = 0 - } - } - } - } finally { - scheduler.shutdown() - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSeekAndCommitWithBrokerFailures(groupProtocol: String): Unit = seekAndCommitWithBrokerFailures(5) - - def seekAndCommitWithBrokerFailures(numIters: Int): Unit = { - val numRecords = 1000 - val producer = createProducer() - producerSend(producer, numRecords) - - val consumer = createConsumer() - consumer.assign(Collections.singletonList(tp)) - consumer.seek(tp, 0) - - // wait until all the followers have synced the last HW with leader - TestUtils.waitUntilTrue(() => brokerServers.forall(server => - server.replicaManager.localLog(tp).get.highWatermark == numRecords - ), "Failed to update high watermark for followers after timeout") - - val scheduler = new BounceBrokerScheduler(numIters) - try { - scheduler.start() - - while (scheduler.isRunning) { - val coin = TestUtils.random.nextInt(3) - if (coin == 0) { - info("Seeking to end of log") - consumer.seekToEnd(Collections.emptyList()) - assertEquals(numRecords.toLong, consumer.position(tp)) - } else if (coin == 1) { - val pos = TestUtils.random.nextInt(numRecords).toLong - info("Seeking to " + pos) - consumer.seek(tp, pos) - assertEquals(pos, consumer.position(tp)) - } else if (coin == 2) { - info("Committing offset.") - consumer.commitSync() - assertEquals(consumer.position(tp), consumer.committed(Set(tp).asJava).get(tp).offset) - } - } - } finally { - scheduler.shutdown() - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSubscribeWhenTopicUnavailable(groupProtocol: String): Unit = { - val numRecords = 1000 - val newtopic = "newtopic" - - val consumer = createConsumer() - consumer.subscribe(Collections.singleton(newtopic)) - executor.schedule(new Runnable { - def run(): Unit = createTopic(newtopic, numPartitions = brokerCount, replicationFactor = brokerCount) - }, 2, TimeUnit.SECONDS) - consumer.poll(time.Duration.ZERO) - - val producer = createProducer() - - def sendRecords(numRecords: Int, topic: String): Unit = { - var remainingRecords = numRecords - val endTimeMs = System.currentTimeMillis + 20000 - while (remainingRecords > 0 && System.currentTimeMillis < endTimeMs) { - val futures = (0 until remainingRecords).map { i => - producer.send(new ProducerRecord(topic, part, i.toString.getBytes, i.toString.getBytes)) - } - futures.map { future => - try { - future.get - remainingRecords -= 1 - } catch { - case _: Exception => - } - } - } - assertEquals(0, remainingRecords) - } - - val poller = new ConsumerAssignmentPoller(consumer, List(newtopic)) - consumerPollers += poller - poller.start() - sendRecords(numRecords, newtopic) - receiveExactRecords(poller, numRecords, 10000) - poller.shutdown() - - brokerServers.foreach(server => killBroker(server.config.brokerId)) - Thread.sleep(500) - restartDeadBrokers() - - val poller2 = new ConsumerAssignmentPoller(consumer, List(newtopic)) - consumerPollers += poller2 - poller2.start() - sendRecords(numRecords, newtopic) - receiveExactRecords(poller, numRecords, 10000L) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testClose(groupProtocol: String): Unit = { - val numRecords = 10 - val producer = createProducer() - producerSend(producer, numRecords) - - checkCloseGoodPath(numRecords, "group1") - checkCloseWithCoordinatorFailure(numRecords, "group2", "group3") - checkCloseWithClusterFailure(numRecords, "group4", "group5", groupProtocol) - } - - /** - * Consumer is closed while cluster is healthy. Consumer should complete pending offset commits - * and leave group. New consumer instance should be able join group and start consuming from - * last committed offset. - */ - private def checkCloseGoodPath(numRecords: Int, groupId: String): Unit = { - val consumer = createConsumerAndReceive(groupId, manualAssign = false, numRecords) - val future = submitCloseAndValidate(consumer, Long.MaxValue, None, gracefulCloseTimeMs) - future.get - checkClosedState(groupId, numRecords) - } - - /** - * Consumer closed while coordinator is unavailable. Close of consumers using group - * management should complete after commit attempt even though commits fail due to rebalance. - * Close of consumers using manual assignment should complete with successful commits since a - * broker is available. - */ - private def checkCloseWithCoordinatorFailure(numRecords: Int, dynamicGroup: String, manualGroup: String): Unit = { - val consumer1 = createConsumerAndReceive(dynamicGroup, manualAssign = false, numRecords) - val consumer2 = createConsumerAndReceive(manualGroup, manualAssign = true, numRecords) - - killBroker(findCoordinator(dynamicGroup)) - killBroker(findCoordinator(manualGroup)) - - submitCloseAndValidate(consumer1, Long.MaxValue, None, gracefulCloseTimeMs).get - submitCloseAndValidate(consumer2, Long.MaxValue, None, gracefulCloseTimeMs).get - - restartDeadBrokers() - checkClosedState(dynamicGroup, 0) - checkClosedState(manualGroup, numRecords) - } - - private def findCoordinator(group: String): Int = { - val request = new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() - .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) - .setCoordinatorKeys(Collections.singletonList(group))).build() - var nodeId = -1 - TestUtils.waitUntilTrue(() => { - val response = connectAndReceive[FindCoordinatorResponse](request) - nodeId = response.node.id - response.error == Errors.NONE - }, s"Failed to find coordinator for group $group") - nodeId - } - - /** - * Consumer is closed while all brokers are unavailable. Cannot rebalance or commit offsets since - * there is no coordinator, but close should timeout and return. If close is invoked with a very - * large timeout, close should timeout after request timeout. - */ - private def checkCloseWithClusterFailure(numRecords: Int, group1: String, group2: String, - groupProtocol: String): Unit = { - val consumer1 = createConsumerAndReceive(group1, manualAssign = false, numRecords) - - val requestTimeout = 6000 - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5000") - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") - } - this.consumerConfig.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout.toString) - val consumer2 = createConsumerAndReceive(group2, manualAssign = true, numRecords) - - brokerServers.foreach(server => killBroker(server.config.brokerId)) - val closeTimeout = 2000 - val future1 = submitCloseAndValidate(consumer1, closeTimeout, None, Some(closeTimeout)) - val future2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(requestTimeout)) - future1.get - future2.get - } - /** * If we have a running consumer group of size N, configure consumer.group.max.size = N-1 and restart all brokers, * the group should be forced to rebalance when it becomes hosted on a Coordinator with the new config. @@ -338,7 +106,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { val partitionCount = consumerCount * 2 this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") } this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") @@ -368,215 +136,9 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { assertTrue(raisedExceptions.head.isInstanceOf[GroupMaxSizeReachedException]) } - /** - * When we have the consumer group max size configured to X, the X+1th consumer trying to join should receive a fatal exception - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(groupProtocol: String): Unit = { - val group = "fatal-exception-test" - val topic = "fatal-exception-test" - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") - } - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - - val partitions = createTopicPartitions(topic, numPartitions = maxGroupSize, replicationFactor = brokerCount) - - // Create N+1 consumers in the same consumer group and assert that the N+1th consumer receives a fatal error when it tries to join the group - val consumerPollers = mutable.Buffer[ConsumerAssignmentPoller]() - try { - addConsumersToGroupAndWaitForGroupAssignment(maxGroupSize, mutable.Buffer[Consumer[Array[Byte], Array[Byte]]](), - consumerPollers, List[String](topic), partitions, group) - val (_, rejectedConsumerPollers) = addConsumersToGroup(1, - mutable.Buffer[Consumer[Array[Byte], Array[Byte]]](), mutable.Buffer[ConsumerAssignmentPoller](), List[String](topic), partitions, group) - val rejectedConsumer = rejectedConsumerPollers.head - TestUtils.waitUntilTrue(() => { - rejectedConsumer.thrownException.isDefined - }, "Extra consumer did not throw an exception") - assertTrue(rejectedConsumer.thrownException.get.isInstanceOf[GroupMaxSizeReachedException]) - - // assert group continues to live - producerSend(createProducer(), maxGroupSize * 100, topic, numPartitions = Some(partitions.size)) - TestUtils.waitUntilTrue(() => { - consumerPollers.forall(p => p.receivedMessages >= 100) - }, "The consumers in the group could not fetch the expected records", 10000L) - } finally { - consumerPollers.foreach(_.shutdown()) - } - } - - /** - * Consumer is closed during rebalance. Close should leave group and close - * immediately if rebalance is in progress. If brokers are not available, - * close should terminate immediately without sending leave group. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCloseDuringRebalance(groupProtocol: String): Unit = { - val topic = "closetest" - createTopic(topic, 10, brokerCount) - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") - } - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - checkCloseDuringRebalance("group1", topic, executor, brokersAvailableDuringClose = true) - } - - private def checkCloseDuringRebalance(groupId: String, topic: String, executor: ExecutorService, brokersAvailableDuringClose: Boolean): Unit = { - - def subscribeAndPoll(consumer: Consumer[Array[Byte], Array[Byte]], revokeSemaphore: Option[Semaphore] = None): Future[Any] = { - executor.submit(() => { - consumer.subscribe(Collections.singletonList(topic)) - revokeSemaphore.foreach(s => s.release()) - consumer.poll(Duration.ofMillis(500)) - }, 0) - } - - def waitForRebalance(timeoutMs: Long, future: Future[Any], otherConsumers: Consumer[Array[Byte], Array[Byte]]*): Unit = { - val startMs = System.currentTimeMillis - while (System.currentTimeMillis < startMs + timeoutMs && !future.isDone) - otherConsumers.foreach(consumer => consumer.poll(time.Duration.ofMillis(100L))) - assertTrue(future.isDone, "Rebalance did not complete in time") - } - - def createConsumerToRebalance(): Future[Any] = { - val consumer = createConsumerWithGroupId(groupId) - val rebalanceSemaphore = new Semaphore(0) - val future = subscribeAndPoll(consumer, Some(rebalanceSemaphore)) - // Wait for consumer to poll and trigger rebalance - assertTrue(rebalanceSemaphore.tryAcquire(2000, TimeUnit.MILLISECONDS), "Rebalance not triggered") - // Rebalance is blocked by other consumers not polling - assertFalse(future.isDone, "Rebalance completed too early") - future - } - val consumer1 = createConsumerWithGroupId(groupId) - waitForRebalance(2000, subscribeAndPoll(consumer1)) - val consumer2 = createConsumerWithGroupId(groupId) - waitForRebalance(2000, subscribeAndPoll(consumer2), consumer1) - val rebalanceFuture = createConsumerToRebalance() - - // consumer1 should leave group and close immediately even though rebalance is in progress - val closeFuture1 = submitCloseAndValidate(consumer1, Long.MaxValue, None, gracefulCloseTimeMs) - - // Rebalance should complete without waiting for consumer1 to timeout since consumer1 has left the group - waitForRebalance(2000, rebalanceFuture, consumer2) - - // Trigger another rebalance and shutdown all brokers - // This consumer poll() doesn't complete and `tearDown` shuts down the executor and closes the consumer - createConsumerToRebalance() - brokerServers.foreach(server => killBroker(server.config.brokerId)) - - // consumer2 should close immediately without LeaveGroup request since there are no brokers available - val closeFuture2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(0)) - - // Ensure futures complete to avoid concurrent shutdown attempt during test cleanup - closeFuture1.get(2000, TimeUnit.MILLISECONDS) - closeFuture2.get(2000, TimeUnit.MILLISECONDS) - } - - private def createConsumerAndReceive(groupId: String, manualAssign: Boolean, numRecords: Int): Consumer[Array[Byte], Array[Byte]] = { - val consumer = createConsumerWithGroupId(groupId) - val consumerPoller = if (manualAssign) - subscribeConsumerAndStartPolling(consumer, List(), Set(tp)) - else - subscribeConsumerAndStartPolling(consumer, List(topic)) - - consumerPollers += consumerPoller - receiveExactRecords(consumerPoller, numRecords) - consumerPoller.shutdown() - consumer - } - - private def receiveExactRecords(consumer: ConsumerAssignmentPoller, numRecords: Int, timeoutMs: Long = 60000): Unit = { - TestUtils.waitUntilTrue(() => { - consumer.receivedMessages == numRecords - }, s"Consumer did not receive expected $numRecords. It received ${consumer.receivedMessages}", timeoutMs) - } - - private def submitCloseAndValidate(consumer: Consumer[Array[Byte], Array[Byte]], - closeTimeoutMs: Long, minCloseTimeMs: Option[Long], maxCloseTimeMs: Option[Long]): Future[Any] = { - executor.submit(() => { - val closeGraceTimeMs = 2000 - val startMs = System.currentTimeMillis() - info("Closing consumer with timeout " + closeTimeoutMs + " ms.") - consumer.close(time.Duration.ofMillis(closeTimeoutMs)) - val timeTakenMs = System.currentTimeMillis() - startMs - maxCloseTimeMs.foreach { ms => - assertTrue(timeTakenMs < ms + closeGraceTimeMs, "Close took too long " + timeTakenMs) - } - minCloseTimeMs.foreach { ms => - assertTrue(timeTakenMs >= ms, "Close finished too quickly " + timeTakenMs) - } - info("consumer.close() completed in " + timeTakenMs + " ms.") - }, 0) - } - - private def checkClosedState(groupId: String, committedRecords: Int): Unit = { - // Check that close was graceful with offsets committed and leave group sent. - // New instance of consumer should be assigned partitions immediately and should see committed offsets. - val assignSemaphore = new Semaphore(0) - val consumer = createConsumerWithGroupId(groupId) - consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener { - def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - assignSemaphore.release() - } - def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { - }}) - - TestUtils.waitUntilTrue(() => { - consumer.poll(time.Duration.ofMillis(100L)) - assignSemaphore.tryAcquire() - }, "Assignment did not complete on time") - - if (committedRecords > 0) - assertEquals(committedRecords, consumer.committed(Set(tp).asJava).get(tp).offset) - consumer.close() - } - - private class BounceBrokerScheduler(val numIters: Int) extends ShutdownableThread("daemon-bounce-broker", false) { - private var iter: Int = 0 - - override def doWork(): Unit = { - killRandomBroker() - Thread.sleep(500) - restartDeadBrokers() - - iter += 1 - if (iter == numIters) - initiateShutdown() - else - Thread.sleep(500) - } - } - private def createTopicPartitions(topic: String, numPartitions: Int, replicationFactor: Int, topicConfig: Properties = new Properties): Set[TopicPartition] = { createTopic(topic, numPartitions = numPartitions, replicationFactor = replicationFactor, topicConfig = topicConfig) Range(0, numPartitions).map(part => new TopicPartition(topic, part)).toSet } - - private def producerSend(producer: KafkaProducer[Array[Byte], Array[Byte]], - numRecords: Int, - topic: String = this.topic, - numPartitions: Option[Int] = None): Unit = { - var partitionIndex = 0 - def getPartition: Int = { - numPartitions match { - case Some(partitions) => - val nextPart = partitionIndex % partitions - partitionIndex += 1 - nextPart - case None => part - } - } - - val futures = (0 until numRecords).map { i => - producer.send(new ProducerRecord(topic, getPartition, i.toString.getBytes, i.toString.getBytes)) - } - futures.map(_.get) - } - } diff --git a/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala deleted file mode 100644 index e708e04541502..0000000000000 --- a/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.api - -import kafka.utils.TestInfoUtils -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.record.{AbstractRecords, CompressionType, MemoryRecords, RecordBatch, RecordVersion, SimpleRecord, TimestampType} -import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import java.nio.ByteBuffer -import java.util -import java.util.{Collections, Optional} -import scala.jdk.CollectionConverters._ - -class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTest { - - val topic1 = "part-test-topic-1" - val topic2 = "part-test-topic-2" - val topic3 = "part-test-topic-3" - - val t1p0 = new TopicPartition(topic1, 0) - val t1p1 = new TopicPartition(topic1, 1) - val t2p0 = new TopicPartition(topic2, 0) - val t2p1 = new TopicPartition(topic2, 1) - val t3p0 = new TopicPartition(topic3, 0) - val t3p1 = new TopicPartition(topic3, 1) - - private def appendLegacyRecords(numRecords: Int, tp: TopicPartition, brokerId: Int, magicValue: Byte): Unit = { - val records = (0 until numRecords).map { i => - new SimpleRecord(i, s"key $i".getBytes, s"value $i".getBytes) - } - val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, CompressionType.NONE, records.asJava)) - val builder = MemoryRecords.builder(buffer, magicValue, Compression.of(CompressionType.NONE).build, - TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, - 0, false, RecordBatch.NO_PARTITION_LEADER_EPOCH) - - records.foreach(builder.append) - - brokers.filter(_.config.brokerId == brokerId).foreach(b => { - val unifiedLog = b.replicaManager.logManager.getLog(tp).get - unifiedLog.appendAsLeaderWithRecordVersion(builder.build(), 0, RecordVersion.lookup(magicValue) - ) - // Default isolation.level is read_uncommitted. It makes Partition#fetchOffsetForTimestamp to return UnifiedLog#highWatermark, - // so increasing high watermark to make it return the correct offset. - unifiedLog.maybeIncrementHighWatermark(unifiedLog.logEndOffsetMetadata) - }) - } - - private def setupTopics(): Unit = { - val producer = createProducer() - createTopic(topic1, numPartitions = 2) - createTopicWithAssignment(topic2, Map(0 -> List(0), 1 -> List(1))) - createTopicWithAssignment(topic3, Map(0 -> List(0), 1 -> List(1))) - - // v2 message format for topic1 - sendRecords(producer, numRecords = 100, t1p0, startingTimestamp = 0) - sendRecords(producer, numRecords = 100, t1p1, startingTimestamp = 0) - // v0 message format for topic2 - appendLegacyRecords(100, t2p0, 0, RecordBatch.MAGIC_VALUE_V0) - appendLegacyRecords(100, t2p1, 1, RecordBatch.MAGIC_VALUE_V0) - // v1 message format for topic3 - appendLegacyRecords(100, t3p0, 0, RecordBatch.MAGIC_VALUE_V1) - appendLegacyRecords(100, t3p1, 1, RecordBatch.MAGIC_VALUE_V1) - - producer.close() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetsForTimes(groupProtocol: String): Unit = { - setupTopics() - val consumer = createConsumer() - - // Test negative target time - assertThrows(classOf[IllegalArgumentException], - () => consumer.offsetsForTimes(Collections.singletonMap(t1p0, -1))) - - val timestampsToSearch = util.Map.of[TopicPartition, java.lang.Long]( - t1p0, 0L, - t1p1, 20L, - t2p0, 40L, - t2p1, 60L, - t3p0, 80L, - t3p1, 100L - ) - - val timestampOffsets = consumer.offsetsForTimes(timestampsToSearch) - - val timestampTopic1P0 = timestampOffsets.get(t1p0) - assertEquals(0, timestampTopic1P0.offset) - assertEquals(0, timestampTopic1P0.timestamp) - assertEquals(Optional.of(0), timestampTopic1P0.leaderEpoch) - - val timestampTopic1P1 = timestampOffsets.get(t1p1) - assertEquals(20, timestampTopic1P1.offset) - assertEquals(20, timestampTopic1P1.timestamp) - assertEquals(Optional.of(0), timestampTopic1P1.leaderEpoch) - - // v0 message format doesn't have timestamp - val timestampTopic2P0 = timestampOffsets.get(t2p0) - assertNull(timestampTopic2P0) - - val timestampTopic2P1 = timestampOffsets.get(t2p1) - assertNull(timestampTopic2P1) - - // v1 message format doesn't have leader epoch - val timestampTopic3P0 = timestampOffsets.get(t3p0) - assertEquals(80, timestampTopic3P0.offset) - assertEquals(80, timestampTopic3P0.timestamp) - assertEquals(Optional.empty, timestampTopic3P0.leaderEpoch) - - assertNull(timestampOffsets.get(t3p1)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testEarliestOrLatestOffsets(groupProtocol: String): Unit = { - setupTopics() - - val partitions = Set(t1p0, t1p1, t2p0, t2p1, t3p0, t3p1).asJava - val consumer = createConsumer() - - val earliests = consumer.beginningOffsets(partitions) - assertEquals(0L, earliests.get(t1p0)) - assertEquals(0L, earliests.get(t1p1)) - assertEquals(0L, earliests.get(t2p0)) - assertEquals(0L, earliests.get(t2p1)) - assertEquals(0L, earliests.get(t3p0)) - assertEquals(0L, earliests.get(t3p1)) - - val latests = consumer.endOffsets(partitions) - assertEquals(100L, latests.get(t1p0)) - assertEquals(100L, latests.get(t1p1)) - assertEquals(100L, latests.get(t2p0)) - assertEquals(100L, latests.get(t2p1)) - assertEquals(100L, latests.get(t3p0)) - assertEquals(100L, latests.get(t3p1)) - } -} diff --git a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala index 0ed9dc44fdf72..e1bd97c93b044 100644 --- a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala +++ b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala @@ -406,7 +406,7 @@ class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable w } override def reconfigurableConfigs: util.Set[String] = { - Set(DefaultProduceQuotaProp, DefaultFetchQuotaProp).asJava + java.util.Set.of(DefaultProduceQuotaProp, DefaultFetchQuotaProp) } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { @@ -437,9 +437,9 @@ class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable w case groupPrincipal: GroupedUserPrincipal => val userGroup = groupPrincipal.userGroup val quotaLimit = quotaOrDefault(userGroup, quotaType) - if (quotaLimit != null) - Map(QuotaGroupTag -> userGroup).asJava - else + if (quotaLimit != null) { + util.Map.of(QuotaGroupTag, userGroup) + } else UnlimitedQuotaMetricTags case _ => UnlimitedQuotaMetricTags diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala index 9c8c8744ebdf8..f777c8da46eff 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala @@ -66,7 +66,7 @@ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setClusterId("XcZZOzUqS4yHOjhMQB6JLQ") formatter.setScramArguments( - List(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) + java.util.List.of(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) } override def createPrivilegedAdminClient(): Admin = createScramAdminClient(kafkaClientSaslMechanism, kafkaPrincipal.getName, kafkaPassword) @@ -105,8 +105,8 @@ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest val privilegedAdminClient = Admin.create(privilegedAdminClientConfig) try { val user = "user" - val results = privilegedAdminClient.alterUserScramCredentials(List[UserScramCredentialAlteration]( - new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.SCRAM_SHA_256, 4096), "password")).asJava) + val results = privilegedAdminClient.alterUserScramCredentials(java.util.List.of[UserScramCredentialAlteration]( + new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.SCRAM_SHA_256, 4096), "password"))) assertEquals(1, results.values.size) val future = results.values.get(user) future.get // make sure we haven't completed exceptionally diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala index b05162d49a015..3af29d58a7a70 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala @@ -26,10 +26,9 @@ import org.apache.kafka.common.resource.ResourcePattern import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.token.delegation.DelegationToken import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test -import java.util.Collections +import java.util import scala.concurrent.ExecutionException import scala.jdk.CollectionConverters._ import scala.util.Using @@ -94,36 +93,33 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE createScramAdminClient(kafkaClientSaslMechanism, tokenRequesterPrincipal.getName, tokenRequesterPassword) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateTokenForOtherUserFails(quorum: String): Unit = { + @Test + def testCreateTokenForOtherUserFails(): Unit = { val thrown = assertThrows(classOf[ExecutionException], () => { createDelegationTokens(() => new CreateDelegationTokenOptions().owner(otherClientPrincipal), assert = false) }) assertTrue(thrown.getMessage.contains("Delegation Token authorization failed")) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeTokenForOtherUserFails(quorum: String): Unit = { + @Test + def testDescribeTokenForOtherUserFails(): Unit = { Using.resource(createScramAdminClient(kafkaClientSaslMechanism, describeTokenFailPrincipal.getName, describeTokenFailPassword)) { describeTokenFailAdminClient => Using.resource(createScramAdminClient(kafkaClientSaslMechanism, otherClientPrincipal.getName, otherClientPassword)) { otherClientAdminClient => otherClientAdminClient.createDelegationToken().delegationToken().get() val tokens = describeTokenFailAdminClient.describeDelegationToken( - new DescribeDelegationTokenOptions().owners(Collections.singletonList(otherClientPrincipal)) + new DescribeDelegationTokenOptions().owners(util.List.of(otherClientPrincipal)) ).delegationTokens.get.asScala assertTrue(tokens.isEmpty) } } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeTokenForOtherUserPasses(quorum: String): Unit = { + @Test + def testDescribeTokenForOtherUserPasses(): Unit = { val adminClient = createTokenRequesterAdminClient() try { val tokens = adminClient.describeDelegationToken( - new DescribeDelegationTokenOptions().owners(Collections.singletonList(clientPrincipal))) + new DescribeDelegationTokenOptions().owners(util.List.of(clientPrincipal))) .delegationTokens.get.asScala assertTrue(tokens.nonEmpty) tokens.foreach(t => { diff --git a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala index b18df9f6af831..40bb4f649cb5d 100644 --- a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala @@ -19,7 +19,7 @@ package kafka.api import com.yammer.metrics.core.Gauge -import java.util.{Collections, Properties} +import java.util.Properties import java.util.concurrent.ExecutionException import org.apache.kafka.metadata.authorizer.StandardAuthorizer import kafka.utils._ @@ -45,6 +45,7 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{CsvSource, MethodSource} +import java.util import scala.jdk.CollectionConverters._ /** @@ -175,7 +176,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testProduceConsumeViaAssign(groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } @@ -204,7 +205,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testProduceConsumeViaSubscribe(groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } @@ -216,7 +217,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val producer = createProducer() sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } @@ -228,7 +229,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val producer = createProducer() sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } @@ -240,15 +241,15 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val tp2 = new TopicPartition("topic2", 0) setAclsAndProduce(tp2) val consumer = createConsumer() - consumer.assign(List(tp2).asJava) + consumer.assign(java.util.List.of(tp2)) consumeRecords(consumer, numRecords, topic = tp2.topic) confirmReauthenticationMetrics() } private def setWildcardResourceAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclWildcardTopicWrite, AclWildcardTopicCreate, AclWildcardTopicDescribe, AclWildcardTopicRead).asJava).values - superuserAdminClient.createAcls(List(AclWildcardGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclWildcardTopicWrite, AclWildcardTopicCreate, AclWildcardTopicDescribe, AclWildcardTopicRead)).values + superuserAdminClient.createAcls(java.util.List.of(AclWildcardGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, wildcardTopicResource) @@ -258,8 +259,8 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas private def setPrefixedResourceAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclPrefixedTopicWrite, AclPrefixedTopicCreate, AclPrefixedTopicDescribe, AclPrefixedTopicRead).asJava).values - superuserAdminClient.createAcls(List(AclPrefixedGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclPrefixedTopicWrite, AclPrefixedTopicCreate, AclPrefixedTopicDescribe, AclPrefixedTopicRead)).values + superuserAdminClient.createAcls(java.util.List.of(AclPrefixedGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, prefixedTopicResource) @@ -271,9 +272,9 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val topicResource = new ResourcePattern(TOPIC, tp.topic, LITERAL) val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclTopicWrite(topicResource), AclTopicCreate(topicResource), AclTopicDescribe(topicResource)).asJava).values - superuserAdminClient.createAcls(List(AclTopicRead(topicResource)).asJava).values - superuserAdminClient.createAcls(List(AclGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(topicResource), AclTopicCreate(topicResource), AclTopicDescribe(topicResource))).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicRead(topicResource))).values + superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, @@ -290,7 +291,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas private def setConsumerGroupAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) } @@ -319,10 +320,10 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, numRecords, topic = tp.topic)) val adminClient = createAdminClient() - val e1 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(Set(topic).asJava).allTopicNames().get()) + val e1 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames().get()) assertTrue(e1.getCause.isInstanceOf[TopicAuthorizationException], "Unexpected exception " + e1.getCause) // Verify successful produce/consume/describe on another topic using the same producer, consumer and adminClient @@ -338,22 +339,22 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas producer sendRecords(producer2, numRecords, tp2) - consumer.assign(List(tp2).asJava) + consumer.assign(java.util.List.of(tp2)) consumeRecords(consumer, numRecords, topic = topic2) - val describeResults = adminClient.describeTopics(Set(topic, topic2).asJava).topicNameValues() + val describeResults = adminClient.describeTopics(java.util.Set.of(topic, topic2)).topicNameValues() assertEquals(1, describeResults.get(topic2).get().partitions().size()) - val e2 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(Set(topic).asJava).allTopicNames().get()) + val e2 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames().get()) assertTrue(e2.getCause.isInstanceOf[TopicAuthorizationException], "Unexpected exception " + e2.getCause) // Verify that consumer manually assigning both authorized and unauthorized topic doesn't consume // from the unauthorized topic and throw; since we can now return data during the time we are updating // metadata / fetching positions, it is possible that the authorized topic record is returned during this time. - consumer.assign(List(tp, tp2).asJava) + consumer.assign(java.util.List.of(tp, tp2)) sendRecords(producer2, numRecords, tp2) var topic2RecordConsumed = false def verifyNoRecords(records: ConsumerRecords[Array[Byte], Array[Byte]]): Boolean = { - assertEquals(Collections.singleton(tp2), records.partitions(), "Consumed records with unexpected partitions: " + records) + assertEquals(util.Set.of(tp2), records.partitions(), "Consumed records with unexpected partitions: " + records) topic2RecordConsumed = true false } @@ -367,7 +368,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } sendRecords(producer2, numRecords, tp) consumeRecordsIgnoreOneAuthorizationException(consumer, numRecords, startingOffset = 0, topic) - val describeResults2 = adminClient.describeTopics(Set(topic, topic2).asJava).topicNameValues + val describeResults2 = adminClient.describeTopics(java.util.Set.of(topic, topic2)).topicNameValues assertEquals(1, describeResults2.get(topic).get().partitions().size()) assertEquals(1, describeResults2.get(topic2).get().partitions().size()) } @@ -379,7 +380,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas )) def testNoProduceWithDescribeAcl(isIdempotenceEnabled:Boolean): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclTopicDescribe()).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicDescribe())).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicDescribeAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) @@ -394,7 +395,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas assertThrows(classOf[KafkaException], () => sendRecords(producer, numRecords, tp)) } else { val e = assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) - assertEquals(Set(topic).asJava, e.unauthorizedTopics()) + assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) } confirmReauthenticationMetrics() } @@ -408,7 +409,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testNoConsumeWithoutDescribeAclViaAssign(groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) // the exception is expected when the consumer attempts to lookup offsets assertThrows(classOf[KafkaException], () => consumeRecords(consumer)) confirmReauthenticationMetrics() @@ -419,18 +420,18 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testNoConsumeWithoutDescribeAclViaSubscribe(groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) // this should timeout since the consumer will not be able to fetch any metadata for the topic assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, timeout = 3000)) // Verify that no records are consumed even if one of the requested topics is authorized setReadAndWriteAcls(tp) - consumer.subscribe(List(topic, "topic2").asJava) + consumer.subscribe(java.util.List.of(topic, "topic2")) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, timeout = 3000)) // Verify that records are consumed if all topics are authorized - consumer.subscribe(List(topic).asJava) - if (groupProtocol.equals(GroupProtocol.CLASSIC)) { + consumer.subscribe(java.util.List.of(topic)) + if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { consumeRecordsIgnoreOneAuthorizationException(consumer) } else { TestUtils.waitUntilTrue(() => { @@ -446,8 +447,8 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas private def noConsumeWithoutDescribeAclSetup(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values - superuserAdminClient.createAcls(List(AclGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values + superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) @@ -457,8 +458,8 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val producer = createProducer() sendRecords(producer, numRecords, tp) - superuserAdminClient.deleteAcls(List(AclTopicDescribe().toFilter).asJava).values - superuserAdminClient.deleteAcls(List(AclTopicWrite().toFilter).asJava).values + superuserAdminClient.deleteAcls(java.util.List.of(AclTopicDescribe().toFilter)).values + superuserAdminClient.deleteAcls(java.util.List.of(AclTopicWrite().toFilter)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) @@ -471,10 +472,10 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testNoConsumeWithDescribeAclViaAssign(groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Set(topic).asJava, e.unauthorizedTopics()) + assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) confirmReauthenticationMetrics() } @@ -483,17 +484,17 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas def testNoConsumeWithDescribeAclViaSubscribe(groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(Set(topic).asJava, e.unauthorizedTopics()) + assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) confirmReauthenticationMetrics() } private def noConsumeWithDescribeAclSetup(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values - superuserAdminClient.createAcls(List(AclGroupRead).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values + superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) @@ -511,7 +512,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas @MethodSource(Array("getTestGroupProtocolParametersAll")) def testNoGroupAcl(groupProtocol: String): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values + superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values brokers.foreach { s => TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) } @@ -519,7 +520,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val e = assertThrows(classOf[GroupAuthorizationException], () => consumeRecords(consumer)) assertEquals(group, e.groupId()) confirmReauthenticationMetrics() diff --git a/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala deleted file mode 100644 index 50588ac9e2952..0000000000000 --- a/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala +++ /dev/null @@ -1,218 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.api - -import java.util.concurrent.ExecutionException -import java.util.concurrent.atomic.AtomicReference -import java.util.Properties -import kafka.integration.KafkaServerTestHarness -import kafka.server._ -import kafka.utils._ -import kafka.utils.Implicits._ -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} -import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, TopicPartition} -import org.apache.kafka.server.metrics.MetricConfigs -import org.apache.kafka.test.{TestUtils => _, _} -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, TestInfo} - -import scala.jdk.CollectionConverters._ -import org.apache.kafka.test.TestUtils.isValidClusterId -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -/** The test cases here verify the following conditions. - * 1. The ProducerInterceptor receives the cluster id after the onSend() method is called and before onAcknowledgement() method is called. - * 2. The Serializer receives the cluster id before the serialize() method is called. - * 3. The producer MetricReporter receives the cluster id after send() method is called on KafkaProducer. - * 4. The ConsumerInterceptor receives the cluster id before the onConsume() method. - * 5. The Deserializer receives the cluster id before the deserialize() method is called. - * 6. The consumer MetricReporter receives the cluster id after poll() is called on KafkaConsumer. - * 7. The broker MetricReporter receives the cluster id after the broker startup is over. - * 8. The broker KafkaMetricReporter receives the cluster id after the broker startup is over. - * 9. All the components receive the same cluster id. - */ - -object EndToEndClusterIdTest { - - object MockConsumerMetricsReporter { - val CLUSTER_META = new AtomicReference[ClusterResource] - } - - class MockConsumerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { - - override def onUpdate(clusterMetadata: ClusterResource): Unit = { - MockConsumerMetricsReporter.CLUSTER_META.set(clusterMetadata) - } - } - - object MockProducerMetricsReporter { - val CLUSTER_META = new AtomicReference[ClusterResource] - } - - class MockProducerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { - - override def onUpdate(clusterMetadata: ClusterResource): Unit = { - MockProducerMetricsReporter.CLUSTER_META.set(clusterMetadata) - } - } - - object MockBrokerMetricsReporter { - val CLUSTER_META = new AtomicReference[ClusterResource] - } - - class MockBrokerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { - - override def onUpdate(clusterMetadata: ClusterResource): Unit = { - MockBrokerMetricsReporter.CLUSTER_META.set(clusterMetadata) - } - } -} - -class EndToEndClusterIdTest extends KafkaServerTestHarness { - - import EndToEndClusterIdTest._ - - val producerCount = 1 - val consumerCount = 1 - val serverCount = 1 - lazy val producerConfig = new Properties - lazy val consumerConfig = new Properties - lazy val serverConfig = new Properties - val numRecords = 1 - val topic = "e2etopic" - val part = 0 - val tp = new TopicPartition(topic, part) - this.serverConfig.setProperty(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockBrokerMetricsReporter].getName) - - override def generateConfigs = { - val cfgs = TestUtils.createBrokerConfigs(serverCount, interBrokerSecurityProtocol = Some(securityProtocol), - trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties) - cfgs.foreach(_ ++= serverConfig) - cfgs.map(KafkaConfig.fromProps) - } - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - super.setUp(testInfo) - MockDeserializer.resetStaticVariables() - // create the consumer offset topic - createTopic(topic, 2, serverCount) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testEndToEnd(groupProtocol: String): Unit = { - val appendStr = "mock" - MockConsumerInterceptor.resetCounters() - MockProducerInterceptor.resetCounters() - - assertNotNull(MockBrokerMetricsReporter.CLUSTER_META) - isValidClusterId(MockBrokerMetricsReporter.CLUSTER_META.get.clusterId) - - val producerProps = new Properties() - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockProducerInterceptor].getName) - producerProps.put("mock.interceptor.append", appendStr) - producerProps.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockProducerMetricsReporter].getName) - val testProducer = new KafkaProducer(producerProps, new MockSerializer, new MockSerializer) - - // Send one record and make sure clusterId is set after send and before onAcknowledgement - sendRecords(testProducer, 1, tp) - assertNotEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT, MockProducerInterceptor.NO_CLUSTER_ID) - assertNotNull(MockProducerInterceptor.CLUSTER_META) - assertEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.get.clusterId, MockProducerInterceptor.CLUSTER_META.get.clusterId) - isValidClusterId(MockProducerInterceptor.CLUSTER_META.get.clusterId) - - // Make sure that serializer gets the cluster id before serialize method. - assertNotEquals(MockSerializer.CLUSTER_ID_BEFORE_SERIALIZE, MockSerializer.NO_CLUSTER_ID) - assertNotNull(MockSerializer.CLUSTER_META) - isValidClusterId(MockSerializer.CLUSTER_META.get.clusterId) - - assertNotNull(MockProducerMetricsReporter.CLUSTER_META) - isValidClusterId(MockProducerMetricsReporter.CLUSTER_META.get.clusterId) - - this.consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockConsumerInterceptor].getName) - this.consumerConfig.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockConsumerMetricsReporter].getName) - this.consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) - val testConsumer = new KafkaConsumer(this.consumerConfig, new MockDeserializer, new MockDeserializer) - testConsumer.assign(List(tp).asJava) - testConsumer.seek(tp, 0) - - // consume and verify that values are modified by interceptors - consumeRecords(testConsumer, numRecords) - - // Check that cluster id is present after the first poll call. - assertNotEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME, MockConsumerInterceptor.NO_CLUSTER_ID) - assertNotNull(MockConsumerInterceptor.CLUSTER_META) - isValidClusterId(MockConsumerInterceptor.CLUSTER_META.get.clusterId) - assertEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId) - - assertNotEquals(MockDeserializer.clusterIdBeforeDeserialize, MockDeserializer.noClusterId) - assertNotNull(MockDeserializer.clusterMeta) - isValidClusterId(MockDeserializer.clusterMeta.get.clusterId) - assertEquals(MockDeserializer.clusterIdBeforeDeserialize.get.clusterId, MockDeserializer.clusterMeta.get.clusterId) - - assertNotNull(MockConsumerMetricsReporter.CLUSTER_META) - isValidClusterId(MockConsumerMetricsReporter.CLUSTER_META.get.clusterId) - - // Make sure everyone receives the same cluster id. - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockSerializer.CLUSTER_META.get.clusterId) - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockProducerMetricsReporter.CLUSTER_META.get.clusterId) - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId) - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockDeserializer.clusterMeta.get.clusterId) - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerMetricsReporter.CLUSTER_META.get.clusterId) - assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockBrokerMetricsReporter.CLUSTER_META.get.clusterId) - - testConsumer.close() - testProducer.close() - MockConsumerInterceptor.resetCounters() - MockProducerInterceptor.resetCounters() - } - - private def sendRecords(producer: KafkaProducer[Array[Byte], Array[Byte]], numRecords: Int, tp: TopicPartition): Unit = { - val futures = (0 until numRecords).map { i => - val record = new ProducerRecord(tp.topic(), tp.partition(), s"$i".getBytes, s"$i".getBytes) - debug(s"Sending this record: $record") - producer.send(record) - } - try { - futures.foreach(_.get) - } catch { - case e: ExecutionException => throw e.getCause - } - } - - private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]], - numRecords: Int, - startingOffset: Int = 0, - topic: String = topic, - part: Int = part): Unit = { - val records = TestUtils.consumeRecords(consumer, numRecords) - - for (i <- 0 until numRecords) { - val record = records(i) - val offset = startingOffset + i - assertEquals(topic, record.topic) - assertEquals(part, record.partition) - assertEquals(offset.toLong, record.offset) - } - } -} diff --git a/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala deleted file mode 100644 index 2d7a82383bc36..0000000000000 --- a/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala +++ /dev/null @@ -1,238 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import java.util.Properties -import java.util.concurrent.ExecutionException -import kafka.api.GroupAuthorizerIntegrationTest._ -import kafka.server.BaseRequestTest -import kafka.utils.{TestInfoUtils, TestUtils} -import org.apache.kafka.clients.consumer.ConsumerConfig -import org.apache.kafka.clients.producer.ProducerRecord -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.acl.{AccessControlEntry, AclOperation, AclPermissionType} -import org.apache.kafka.common.config.internals.BrokerSecurityConfigs -import org.apache.kafka.common.errors.TopicAuthorizationException -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourceType} -import org.apache.kafka.common.security.auth.{AuthenticationContext, KafkaPrincipal} -import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig -import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.authorizer.StandardAuthorizer -import org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST -import org.apache.kafka.server.config.ServerConfigs -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.function.Executable -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import scala.jdk.CollectionConverters._ - -object GroupAuthorizerIntegrationTest { - val BrokerPrincipal = new KafkaPrincipal("Group", "broker") - val ClientPrincipal = new KafkaPrincipal("Group", "client") - - val BrokerListenerName = "BROKER" - val ClientListenerName = "CLIENT" - val ControllerListenerName = "CONTROLLER" - - class GroupPrincipalBuilder extends DefaultKafkaPrincipalBuilder(null, null) { - override def build(context: AuthenticationContext): KafkaPrincipal = { - context.listenerName match { - case BrokerListenerName | ControllerListenerName => BrokerPrincipal - case ClientListenerName => ClientPrincipal - case listenerName => throw new IllegalArgumentException(s"No principal mapped to listener $listenerName") - } - } - } -} - -class GroupAuthorizerIntegrationTest extends BaseRequestTest { - - val brokerId: Integer = 0 - - override def brokerCount: Int = 1 - override def interBrokerListenerName: ListenerName = new ListenerName(BrokerListenerName) - override def listenerName: ListenerName = new ListenerName(ClientListenerName) - - def brokerPrincipal: KafkaPrincipal = BrokerPrincipal - def clientPrincipal: KafkaPrincipal = ClientPrincipal - - override def kraftControllerConfigs(testInfo: TestInfo): collection.Seq[Properties] = { - val controllerConfigs = super.kraftControllerConfigs(testInfo) - controllerConfigs.foreach(addNodeProperties) - controllerConfigs - } - - override def brokerPropertyOverrides(properties: Properties): Unit = { - properties.put(ServerConfigs.BROKER_ID_CONFIG, brokerId.toString) - addNodeProperties(properties) - } - - private def addNodeProperties(properties: Properties): Unit = { - properties.put(ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, classOf[StandardAuthorizer].getName) - properties.put(StandardAuthorizer.SUPER_USERS_CONFIG, BrokerPrincipal.toString) - - properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1") - properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") - properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, "1") - properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") - properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1") - properties.put(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, classOf[GroupPrincipalBuilder].getName) - } - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - doSetup(testInfo, createOffsetsTopic = false) - - // Allow inter-broker communication - addAndVerifyAcls( - Set(createAcl(AclOperation.CLUSTER_ACTION, AclPermissionType.ALLOW, principal = BrokerPrincipal)), - new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) - ) - - createOffsetsTopic(interBrokerListenerName) - } - - private def createAcl(aclOperation: AclOperation, - aclPermissionType: AclPermissionType, - principal: KafkaPrincipal = ClientPrincipal): AccessControlEntry = { - new AccessControlEntry(principal.toString, WILDCARD_HOST, aclOperation, aclPermissionType) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUnauthorizedProduceAndConsume(groupProtocol: String): Unit = { - val topic = "topic" - val topicPartition = new TopicPartition("topic", 0) - - createTopic(topic, listenerName = interBrokerListenerName) - - val producer = createProducer() - val produceException = assertThrows(classOf[ExecutionException], - () => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get()).getCause - assertTrue(produceException.isInstanceOf[TopicAuthorizationException]) - assertEquals(Set(topic), produceException.asInstanceOf[TopicAuthorizationException].unauthorizedTopics.asScala) - - val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(List(topicPartition).asJava) - val consumeException = assertThrows(classOf[TopicAuthorizationException], - () => TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1)) - assertEquals(Set(topic), consumeException.unauthorizedTopics.asScala) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeUnsubscribeWithoutGroupPermission(groupProtocol: String): Unit = { - val topic = "topic" - - createTopic(topic, listenerName = interBrokerListenerName) - - // allow topic read/write permission to poll/send record - addAndVerifyAcls( - Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) - ) - val producer = createProducer() - producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() - producer.close() - - // allow group read permission to join group - val group = "group" - addAndVerifyAcls( - Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) - ) - - val props = new Properties() - props.put(ConsumerConfig.GROUP_ID_CONFIG, group) - props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - val consumer = createConsumer(configOverrides = props) - consumer.subscribe(List(topic).asJava) - TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) - - removeAndVerifyAcls( - Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) - ) - - assertDoesNotThrow(new Executable { - override def execute(): Unit = consumer.unsubscribe() - }) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeCloseWithoutGroupPermission(groupProtocol: String): Unit = { - val topic = "topic" - createTopic(topic, listenerName = interBrokerListenerName) - - // allow topic read/write permission to poll/send record - addAndVerifyAcls( - Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) - ) - val producer = createProducer() - producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() - - // allow group read permission to join group - val group = "group" - addAndVerifyAcls( - Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) - ) - - val props = new Properties() - props.put(ConsumerConfig.GROUP_ID_CONFIG, group) - props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - val consumer = createConsumer(configOverrides = props) - consumer.subscribe(List(topic).asJava) - TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) - - removeAndVerifyAcls( - Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) - ) - - assertDoesNotThrow(new Executable { - override def execute(): Unit = consumer.close() - }) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAuthorizedProduceAndConsume(groupProtocol: String): Unit = { - val topic = "topic" - val topicPartition = new TopicPartition("topic", 0) - - createTopic(topic, listenerName = interBrokerListenerName) - - addAndVerifyAcls( - Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) - ) - val producer = createProducer() - producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() - - addAndVerifyAcls( - Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), - new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) - ) - val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(List(topicPartition).asJava) - TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) - } - -} diff --git a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala index 3e761620efd9c..e161406a50e51 100644 --- a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala @@ -16,8 +16,8 @@ import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Typ import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, ConsumerGroupDescription} import org.apache.kafka.clients.consumer.{Consumer, GroupProtocol, OffsetAndMetadata} -import org.apache.kafka.common.errors.GroupIdNotFoundException -import org.apache.kafka.common.{ConsumerGroupState, GroupType, KafkaFuture, TopicPartition} +import org.apache.kafka.common.errors.{GroupIdNotFoundException, UnknownTopicOrPartitionException} +import org.apache.kafka.common.{ConsumerGroupState, GroupType, KafkaFuture, TopicCollection, TopicPartition} import org.junit.jupiter.api.Assertions._ import scala.jdk.CollectionConverters._ @@ -27,11 +27,12 @@ import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.storage.internals.log.UnifiedLog +import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Timeout import java.time.Duration -import java.util.Collections import java.util.concurrent.TimeUnit +import scala.concurrent.ExecutionException @Timeout(120) class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { @@ -47,9 +48,9 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { ) def testGroupCoordinatorPropagatesOffsetsTopicCompressionCodec(): Unit = { withConsumer(groupId = "group", groupProtocol = GroupProtocol.CLASSIC) { consumer => - consumer.commitSync(Map( - new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) -> new OffsetAndMetadata(10, "") - ).asJava) + consumer.commitSync(java.util.Map.of( + new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0), new OffsetAndMetadata(10, "") + )) val logManager = cluster.brokers().asScala.head._2.logManager def getGroupMetadataLogOpt: Option[UnifiedLog] = @@ -85,7 +86,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a consumer group grp1 with one member. The member subscribes to foo and leaves. This creates // a mix of group records with tombstones to delete the member. withConsumer(groupId = "grp1", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment.asScala.nonEmpty @@ -105,7 +106,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(List("grp1").asJava) + .describeConsumerGroups(java.util.List.of("grp1")) .describedGroups() .asScala .toMap @@ -134,14 +135,14 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // and ensure that all the offset commit records are before the consumer group records due to the // rebalance after the commit sync. withConsumer(groupId = "grp2", groupProtocol = GroupProtocol.CONSUMER, enableAutoCommit = false) { consumer => - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty }, msg = "Consumer did not get an non empty assignment") consumer.commitSync() consumer.unsubscribe() - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -161,7 +162,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(List("grp2").asJava) + .describeConsumerGroups(java.util.List.of("grp2")) .describedGroups() .asScala .toMap @@ -188,7 +189,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a consumer group grp3 with one member. The member subscribes to foo and leaves the group. Then // the group is deleted. This creates tombstones to delete the member, the group and the offsets. withConsumer(groupId = "grp3", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -196,7 +197,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } admin - .deleteConsumerGroups(List("grp3").asJava) + .deleteConsumerGroups(java.util.List.of("grp3")) .deletedGroups() .get("grp3") .get(10, TimeUnit.SECONDS) @@ -214,7 +215,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(List("grp3").asJava) + .describeConsumerGroups(java.util.List.of("grp3")) .describedGroups() .asScala .toMap @@ -241,7 +242,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a classic group grp4 with one member. Upgrades the group to the consumer // protocol. withConsumer(groupId = "grp4", groupProtocol = GroupProtocol.CLASSIC) { consumer => - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -249,7 +250,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } withConsumer(groupId = "grp4", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(List("foo").asJava) + consumer.subscribe(java.util.List.of("foo")) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -269,7 +270,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(List("grp4").asJava) + .describeConsumerGroups(java.util.List.of("grp4")) .describedGroups() .asScala .toMap @@ -278,6 +279,58 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } } + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testRecreatingConsumerOffsetsTopic(): Unit = { + withAdmin { admin => + TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + withConsumer(groupId = "group", groupProtocol = GroupProtocol.CONSUMER) { consumer => + consumer.subscribe(List("foo").asJava) + TestUtils.waitUntilTrue(() => { + consumer.poll(Duration.ofMillis(50)) + consumer.assignment().asScala.nonEmpty + }, msg = "Consumer did not get an non empty assignment") + } + + admin + .deleteTopics(TopicCollection.ofTopicNames(List(Topic.GROUP_METADATA_TOPIC_NAME).asJava)) + .all() + .get() + + TestUtils.waitUntilTrue(() => { + try { + admin + .describeTopics(TopicCollection.ofTopicNames(List(Topic.GROUP_METADATA_TOPIC_NAME).asJava)) + .topicNameValues() + .get(Topic.GROUP_METADATA_TOPIC_NAME) + .get(JTestUtils.DEFAULT_MAX_WAIT_MS, TimeUnit.MILLISECONDS) + false + } catch { + case e: ExecutionException => + e.getCause.isInstanceOf[UnknownTopicOrPartitionException] + } + }, msg = s"${Topic.GROUP_METADATA_TOPIC_NAME} was not deleted") + + withConsumer(groupId = "group", groupProtocol = GroupProtocol.CONSUMER) { consumer => + consumer.subscribe(List("foo").asJava) + TestUtils.waitUntilTrue(() => { + consumer.poll(Duration.ofMillis(50)) + consumer.assignment().asScala.nonEmpty + }, msg = "Consumer did not get an non empty assignment") + } + } + } + private def rollAndCompactConsumerOffsets(): Unit = { val tp = new TopicPartition("__consumer_offsets", 0) val broker = cluster.brokers.asScala.head._2 @@ -324,7 +377,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { assertEquals(groupId, group.groupId) assertEquals(groupType, group.`type`) assertEquals(state, group.state) - assertEquals(Collections.emptyList, group.members) + assertEquals(java.util.List.of, group.members) } private def assertDescribedDeadGroup( diff --git a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala index 6b23b2b9d3de1..303e989e9b4fb 100644 --- a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala +++ b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala @@ -22,22 +22,25 @@ import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsume import kafka.utils.TestUtils import kafka.utils.Implicits._ -import java.util.Properties +import java.util +import java.util.{Optional, Properties, UUID} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig} import kafka.server.KafkaConfig import kafka.integration.KafkaServerTestHarness import kafka.security.JaasTestUtils import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} +import org.apache.kafka.clients.consumer.internals.{AsyncKafkaConsumer, StreamsRebalanceData, StreamsRebalanceListener} import org.apache.kafka.common.network.{ConnectionMode, ListenerName} import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer} -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.common.utils.Utils import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.MetadataLogConfig -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import scala.collection.mutable import scala.collection.Seq +import scala.jdk.CollectionConverters._ import scala.jdk.javaapi.OptionConverters /** @@ -50,6 +53,7 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { val producerConfig = new Properties val consumerConfig = new Properties val shareConsumerConfig = new Properties + val streamsConsumerConfig = new Properties val adminClientConfig = new Properties val superuserClientConfig = new Properties val serverConfig = new Properties @@ -57,6 +61,7 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { private val consumers = mutable.Buffer[Consumer[_, _]]() private val shareConsumers = mutable.Buffer[ShareConsumer[_, _]]() + private val streamsConsumers = mutable.Buffer[Consumer[_, _]]() private val producers = mutable.Buffer[KafkaProducer[_, _]]() private val adminClients = mutable.Buffer[Admin]() @@ -71,10 +76,6 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties, logDirCount = logDirCount) configureListeners(cfgs) modifyConfigs(cfgs) - if (isShareGroupTest()) { - cfgs.foreach(_.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,share")) - cfgs.foreach(_.setProperty(ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, "true")) - } cfgs.foreach(_.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, TestUtils.tempDir().getAbsolutePath)) insertControllerListenersIfNeeded(cfgs) cfgs.map(KafkaConfig.fromProps) @@ -153,7 +154,12 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { shareConsumerConfig.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "group") shareConsumerConfig.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) shareConsumerConfig.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - + + streamsConsumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) + streamsConsumerConfig.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "group") + streamsConsumerConfig.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) + streamsConsumerConfig.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) + adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) doSuperuserSetup(testInfo) @@ -212,6 +218,67 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { shareConsumer } + def createStreamsConsumer[K, V](keyDeserializer: Deserializer[K] = new ByteArrayDeserializer, + valueDeserializer: Deserializer[V] = new ByteArrayDeserializer, + configOverrides: Properties = new Properties, + configsToRemove: List[String] = List(), + streamsRebalanceData: StreamsRebalanceData): AsyncKafkaConsumer[K, V] = { + val props = new Properties + props ++= streamsConsumerConfig + props ++= configOverrides + configsToRemove.foreach(props.remove(_)) + val streamsConsumer = new AsyncKafkaConsumer[K, V]( + new ConsumerConfig(ConsumerConfig.appendDeserializerToConfig(Utils.propsToMap(props), keyDeserializer, valueDeserializer)), + keyDeserializer, + valueDeserializer, + Optional.of(streamsRebalanceData) + ) + streamsConsumers += streamsConsumer + streamsConsumer + } + + def createStreamsGroup[K, V](configOverrides: Properties = new Properties, + configsToRemove: List[String] = List(), + inputTopic: String, + streamsGroupId: String): AsyncKafkaConsumer[K, V] = { + val props = new Properties() + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) + props.put(ConsumerConfig.GROUP_ID_CONFIG, streamsGroupId) + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) + props ++= configOverrides + configsToRemove.foreach(props.remove(_)) + + val streamsRebalanceData = new StreamsRebalanceData( + UUID.randomUUID(), + Optional.empty(), + util.Map.of( + "subtopology-0", new StreamsRebalanceData.Subtopology( + util.Set.of(inputTopic), + util.Set.of(), + util.Map.of(), + util.Map.of(inputTopic + "-store-changelog", new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())), + util.Set.of() + )), + Map.empty[String, String].asJava + ) + + val consumer = createStreamsConsumer( + keyDeserializer = new ByteArrayDeserializer().asInstanceOf[Deserializer[K]], + valueDeserializer = new ByteArrayDeserializer().asInstanceOf[Deserializer[V]], + configOverrides = props, + streamsRebalanceData = streamsRebalanceData + ) + consumer.subscribe(util.Set.of(inputTopic), + new StreamsRebalanceListener { + override def onTasksRevoked(tasks: util.Set[StreamsRebalanceData.TaskId]): Unit = () + override def onTasksAssigned(assignment: StreamsRebalanceData.Assignment): Unit = () + override def onAllTasksLost(): Unit = () + }) + consumer + } + def createAdminClient( listenerName: ListenerName = listenerName, configOverrides: Properties = new Properties @@ -244,11 +311,14 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { consumers.foreach(_.close(Duration.ZERO)) shareConsumers.foreach(_.wakeup()) shareConsumers.foreach(_.close(Duration.ZERO)) + streamsConsumers.foreach(_.wakeup()) + streamsConsumers.foreach(_.close(Duration.ZERO)) adminClients.foreach(_.close(Duration.ZERO)) producers.clear() consumers.clear() shareConsumers.clear() + streamsConsumers.clear() adminClients.clear() } finally { super.tearDown() diff --git a/core/src/test/scala/integration/kafka/api/MetricsTest.scala b/core/src/test/scala/integration/kafka/api/MetricsTest.scala index e08801343fc5b..b2930c6b3e5dc 100644 --- a/core/src/test/scala/integration/kafka/api/MetricsTest.scala +++ b/core/src/test/scala/integration/kafka/api/MetricsTest.scala @@ -79,9 +79,9 @@ class MetricsTest extends IntegrationTestHarness with SaslSetup { /** * Verifies some of the metrics of producer, consumer as well as server. */ - @ParameterizedTest(name = "testMetrics with systemRemoteStorageEnabled: {1}") - @CsvSource(Array("kraft, true", "kraft, false")) - def testMetrics(quorum: String, systemRemoteStorageEnabled: Boolean): Unit = { + @ParameterizedTest(name = "testMetrics with systemRemoteStorageEnabled: {0}") + @CsvSource(Array("true", "false")) + def testMetrics(systemRemoteStorageEnabled: Boolean): Unit = { val topic = "mytopic" createTopic(topic, numPartitions = 1, @@ -98,7 +98,7 @@ class MetricsTest extends IntegrationTestHarness with SaslSetup { sendRecords(producer, numRecords, recordSize, tp) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumer.seek(tp, 0) TestUtils.consumeRecords(consumer, numRecords) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index 0ab2328c53f79..c8e26445922f6 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -22,19 +22,19 @@ import java.nio.ByteBuffer import java.nio.file.{Files, Paths, StandardOpenOption} import java.lang.{Long => JLong} import java.time.{Duration => JDuration} -import java.util.Arrays.asList import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit} -import java.util.{Collections, Locale, Optional, Properties} +import java.util.{Collections, Optional, Properties} import java.{time, util} import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig import kafka.utils.TestUtils._ -import kafka.utils.{LoggingController, TestInfoUtils, TestUtils} +import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.HostResolver import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.ConfigEntry.ConfigSource import org.apache.kafka.clients.admin._ +import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer import org.apache.kafka.clients.consumer.{CommitFailedException, Consumer, ConsumerConfig, GroupProtocol, KafkaConsumer, OffsetAndMetadata, ShareConsumer} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.acl.{AccessControlEntry, AclBinding, AclBindingFilter, AclOperation, AclPermissionType} @@ -53,14 +53,16 @@ import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEX import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.authorizer.AclEntry +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, MetadataVersion} import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.logger.LoggingController import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogFileUtils} import org.apache.kafka.test.TestUtils.{DEFAULT_MAX_WAIT_MS, assertFutureThrows} import org.apache.logging.log4j.core.config.Configurator import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{MethodSource, ValueSource} +import org.junit.jupiter.params.provider.MethodSource import org.slf4j.LoggerFactory import java.util.AbstractMap.SimpleImmutableEntry @@ -68,7 +70,6 @@ import scala.collection.Seq import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption import scala.util.{Random, Using} /** @@ -88,7 +89,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - Configurator.reconfigure(); + Configurator.reconfigure() brokerLoggerConfigResource = new ConfigResource( ConfigResource.Type.BROKER_LOGGER, brokers.head.config.brokerId.toString) } @@ -104,10 +105,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describe and broker val brokerResource1 = new ConfigResource(ConfigResource.Type.BROKER, brokers(1).config.brokerId.toString) val brokerResource2 = new ConfigResource(ConfigResource.Type.BROKER, brokers(2).config.brokerId.toString) - val configResources = Seq(brokerResource1, brokerResource2) + val configResources = util.List.of(brokerResource1, brokerResource2) val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.describeConfigs(configResources.asJava,new DescribeConfigsOptions().timeoutMs(0)).all().get() + brokenClient.describeConfigs(configResources,new DescribeConfigsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) @@ -122,11 +123,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId) client = Admin.create(config) - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> clientId).asJava) + val entity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.CLIENT_ID, clientId)) val configEntries = Map(QuotaConfig.CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG -> 1.0, QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 3.0) - client.alterClientQuotas(Seq(new ClientQuotaAlteration(entity, configEntries.map { case (k, v) => + client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(entity, configEntries.map { case (k, v) => new ClientQuotaAlteration.Op(k, v) - }.asJavaCollection)).asJavaCollection).all.get + }.asJavaCollection))).all.get TestUtils.waitUntilTrue(() => { // wait for our ClientQuotaEntity to be set @@ -145,27 +146,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = Admin.create(config) //"" can not create default quota - val userEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> defaultQuota).asJava) - val clientEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> defaultQuota).asJava) + val userEntity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.USER, defaultQuota)) + val clientEntity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.CLIENT_ID, defaultQuota)) val userAlterations = new ClientQuotaAlteration(userEntity, - Collections.singleton(new ClientQuotaAlteration.Op("consumer_byte_rate", 10000D))) + util.Set.of(new ClientQuotaAlteration.Op("consumer_byte_rate", 10000D))) val clientAlterations = new ClientQuotaAlteration(clientEntity, - Collections.singleton(new ClientQuotaAlteration.Op("producer_byte_rate", 10000D))) - val alterations = List(userAlterations, clientAlterations) - client.alterClientQuotas(alterations.asJava).all().get() + util.Set.of(new ClientQuotaAlteration.Op("producer_byte_rate", 10000D))) + val alterations = util.List.of(userAlterations, clientAlterations) + client.alterClientQuotas(alterations).all().get() TestUtils.waitUntilTrue(() => { try { //check "" as a default quota use - val userDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val userDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)))).entities().get() - val clientDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val clientDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.CLIENT_ID)))).entities().get() //check "" as a normal quota use - val userNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val userNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER,defaultQuota)))).entities().get() - val clientNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val clientNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID,defaultQuota)))).entities().get() userDefaultQuotas.size() == 0 && clientDefaultQuotas.size() == 0 && userNormalQuota.size() == 1 && clientNormalQuota.size() == 1 @@ -176,17 +177,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { //null can create default quota val userDefaultEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> Option.empty[String].orNull).asJava) - client.alterClientQuotas(List(new ClientQuotaAlteration(userDefaultEntity, Collections.singleton( - new ClientQuotaAlteration.Op("consumer_byte_rate", 100D)))).asJava).all().get() + client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(userDefaultEntity, util.Set.of( + new ClientQuotaAlteration.Op("consumer_byte_rate", 100D))))).all().get() val clientDefaultEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> Option.empty[String].orNull).asJava) - client.alterClientQuotas(List(new ClientQuotaAlteration(clientDefaultEntity, Collections.singleton( - new ClientQuotaAlteration.Op("producer_byte_rate", 100D)))).asJava).all().get() + client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(clientDefaultEntity, util.Set.of( + new ClientQuotaAlteration.Op("producer_byte_rate", 100D))))).all().get() TestUtils.waitUntilTrue(() => { try { - val userDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val userDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)))).entities().get() - val clientDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(Collections.singletonList( + val clientDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.CLIENT_ID)))).entities().get() userDefaultQuota.size() == 1 && clientDefaultQuota.size() == 1 } catch { @@ -201,7 +202,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // add a new user val targetUserName = "tom" - client.alterUserScramCredentials(Collections.singletonList( + client.alterUserScramCredentials(util.List.of( new UserScramCredentialUpsertion(targetUserName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456") )).all.get TestUtils.waitUntilTrue(() => client.describeUserScramCredentials().all().get().size() == 1, @@ -218,7 +219,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }) // add other users - client.alterUserScramCredentials(util.Arrays.asList( + client.alterUserScramCredentials(util.List.of( new UserScramCredentialUpsertion("tom2", new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456"), new UserScramCredentialUpsertion("tom3", new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456") )).all().get @@ -226,7 +227,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { "Add user scram credential timeout") // alter user info - client.alterUserScramCredentials(Collections.singletonList( + client.alterUserScramCredentials(util.List.of( new UserScramCredentialUpsertion(targetUserName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_512, 8192), "123456") )).all.get TestUtils.waitUntilTrue(() => { @@ -246,7 +247,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(8192, credentialList(1).iterations()) // test describeUserScramCredentials(List users) - val userAndScramMap = client.describeUserScramCredentials(Collections.singletonList("tom2")).all().get() + val userAndScramMap = client.describeUserScramCredentials(util.List.of("tom2")).all().get() assertEquals(1, userAndScramMap.size()) val scram = userAndScramMap.get("tom2") assertNotNull(scram) @@ -268,7 +269,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { try { // test describeUserScramCredentials(List users, DescribeUserScramCredentialsOptions options) val exception = assertThrows(classOf[ExecutionException], () => { - client.describeUserScramCredentials(Collections.singletonList("tom4"), + client.describeUserScramCredentials(util.List.of("tom4"), new DescribeUserScramCredentialsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) @@ -282,8 +283,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(configs, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.assign(Collections.singleton(topicPartition)) - consumer.seekToBeginning(Collections.singleton(topicPartition)) + consumer.assign(util.Set.of(topicPartition)) + consumer.seekToBeginning(util.Set.of(topicPartition)) var consumeNum = 0 TestUtils.waitUntilTrue(() => { val records = consumer.poll(time.Duration.ofMillis(100)) @@ -297,10 +298,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testDescribeProducers(groupProtocol: String): Unit = { client = createAdminClient - client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() def appendCommonRecords = (records: Int) => { - val producer = new KafkaProducer(Collections.singletonMap(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + val producer = new KafkaProducer(util.Map.of(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, plaintextBootstrapServers(brokers).asInstanceOf[Object]), new ByteArraySerializer, new ByteArraySerializer) try { (0 until records).foreach(i => @@ -326,7 +327,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } def queryProducerDetail() = client - .describeProducers(Collections.singletonList(topicPartition)) + .describeProducers(util.List.of(topicPartition)) .partitionResult(topicPartition).get().activeProducers().asScala // send common msg @@ -369,13 +370,13 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @Test def testDescribeTransactions(): Unit = { client = createAdminClient - client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() var transactionId = "foo" val stateAbnormalMsg = "The transaction state is abnormal" def describeTransactions(): TransactionDescription = { - client.describeTransactions(Collections.singleton(transactionId)).description(transactionId).get() + client.describeTransactions(util.Set.of(transactionId)).description(transactionId).get() } def transactionState(): TransactionState = { describeTransactions().state() @@ -385,7 +386,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // calculate the transaction partition id val transactionPartitionId = Utils.abs(transactionId.hashCode) % brokers.head.metadataCache.numPartitions(Topic.TRANSACTION_STATE_TOPIC_NAME).get - val transactionTopic = client.describeTopics(Collections.singleton(Topic.TRANSACTION_STATE_TOPIC_NAME)) + val transactionTopic = client.describeTopics(util.Set.of(Topic.TRANSACTION_STATE_TOPIC_NAME)) val partitionList = transactionTopic.allTopicNames().get().get(Topic.TRANSACTION_STATE_TOPIC_NAME).partitions() partitionList.asScala.filter(tp => tp.partition() == transactionPartitionId).head.leader().id() } @@ -411,7 +412,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(findCoordinatorIdByTransactionId(transactionId), transactionResult.coordinatorId()) assertEquals(0, transactionResult.producerId()) assertEquals(0, transactionResult.producerEpoch()) - assertEquals(Collections.singleton(topicPartition), transactionResult.topicPartitions()) + assertEquals(util.Set.of(topicPartition), transactionResult.topicPartitions()) producer.commitTransaction() TestUtils.waitUntilTrue(() => transactionState() == TransactionState.COMPLETE_COMMIT, stateAbnormalMsg) @@ -440,7 +441,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val transactionSendMsgResult = describeTransactions() assertEquals(findCoordinatorIdByTransactionId(transactionId), transactionSendMsgResult.coordinatorId()) - assertEquals(Collections.singleton(topicPartition), transactionSendMsgResult.topicPartitions()) + assertEquals(util.Set.of(topicPartition), transactionSendMsgResult.topicPartitions()) assertEquals(topicPartition, transactionSendMsgResult.topicPartitions().asScala.head) TestUtils.waitUntilTrue(() => transactionState() == TransactionState.ONGOING, stateAbnormalMsg) @@ -457,7 +458,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { try { val transactionId = "foo" val exception = assertThrows(classOf[ExecutionException], () => { - client.describeTransactions(Collections.singleton(transactionId), + client.describeTransactions(util.Set.of(transactionId), new DescribeTransactionsOptions().timeoutMs(0)).description(transactionId).get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) @@ -482,11 +483,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testListTransactions(): Unit = { def createTransactionList(): Unit = { client = createAdminClient - client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() val stateAbnormalMsg = "The transaction state is abnormal" def transactionState(transactionId: String): TransactionState = { - client.describeTransactions(Collections.singleton(transactionId)).description(transactionId).get().state() + client.describeTransactions(util.Set.of(transactionId)).description(transactionId).get().state() } val transactionId1 = "foo" @@ -527,11 +528,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, client.listTransactions().all().get().size()) assertEquals(2, client.listTransactions(new ListTransactionsOptions() - .filterStates(Collections.singletonList(TransactionState.COMPLETE_COMMIT))).all().get().size()) + .filterStates(util.List.of(TransactionState.COMPLETE_COMMIT))).all().get().size()) assertEquals(1, client.listTransactions(new ListTransactionsOptions() - .filterStates(Collections.singletonList(TransactionState.COMPLETE_ABORT))).all().get().size()) + .filterStates(util.List.of(TransactionState.COMPLETE_ABORT))).all().get().size()) assertEquals(1, client.listTransactions(new ListTransactionsOptions() - .filterProducerIds(Collections.singletonList(0L))).all().get().size()) + .filterProducerIds(util.List.of(0L))).all().get().size()) // ensure all transaction's txnStartTimestamp >= 500 Thread.sleep(501) @@ -556,7 +557,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testAbortTransaction(groupProtocol: String): Unit = { client = createAdminClient val tp = new TopicPartition("topic1", 0) - client.createTopics(Collections.singletonList(new NewTopic(tp.topic(), 1, 1.toShort))).all().get() + client.createTopics(util.List.of(new NewTopic(tp.topic(), 1, 1.toShort))).all().get() def checkConsumer = (tp: TopicPartition, expectedNumber: Int) => { val configs = new util.HashMap[String, Object]() @@ -565,15 +566,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(configs, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.assign(Collections.singleton(tp)) - consumer.seekToBeginning(Collections.singleton(tp)) + consumer.assign(util.Set.of(tp)) + consumer.seekToBeginning(util.Set.of(tp)) val records = consumer.poll(time.Duration.ofSeconds(3)) assertEquals(expectedNumber, records.count()) } finally consumer.close() } def appendRecord = (records: Int) => { - val producer = new KafkaProducer(Collections.singletonMap(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + val producer = new KafkaProducer(util.Map.of(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, plaintextBootstrapServers(brokers).asInstanceOf[Object]), new ByteArraySerializer, new ByteArraySerializer) try { (0 until records).foreach(i => @@ -602,7 +603,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { appendRecord(1) checkConsumer(tp, 1) - val transactionalProducer = client.describeProducers(Collections.singletonList(tp)) + val transactionalProducer = client.describeProducers(util.List.of(tp)) .partitionResult(tp).get().activeProducers().asScala.minBy(_.producerId()) assertDoesNotThrow(() => client.abortTransaction( @@ -676,14 +677,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = createAdminClient val topic = "mytopic" val topics = Seq(topic) - val newTopics = Seq(new NewTopic(topic, 1, 1.toShort)) + val newTopics = util.List.of(new NewTopic(topic, 1, 1.toShort)) - client.createTopics(newTopics.asJava).all.get() + client.createTopics(newTopics).all.get() waitForTopics(client, topics, List()) - val newTopicsWithInvalidRF = Seq(new NewTopic(topic, 1, (brokers.size + 1).toShort)) + val newTopicsWithInvalidRF = util.List.of(new NewTopic(topic, 1, (brokers.size + 1).toShort)) val e = assertThrows(classOf[ExecutionException], - () => client.createTopics(newTopicsWithInvalidRF.asJava, new CreateTopicsOptions().validateOnly(true)).all.get()) + () => client.createTopics(newTopicsWithInvalidRF, new CreateTopicsOptions().validateOnly(true)).all.get()) assertTrue(e.getCause.isInstanceOf[TopicExistsException]) } @@ -691,12 +692,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testDeleteTopicsWithIds(): Unit = { client = createAdminClient val topics = Seq("mytopic", "mytopic2", "mytopic3") - val newTopics = Seq( - new NewTopic("mytopic", Map((0: Integer) -> Seq[Integer](1, 2).asJava, (1: Integer) -> Seq[Integer](2, 0).asJava).asJava), + val newTopics = util.List.of( + new NewTopic("mytopic", util.Map.of(0: Integer, util.List.of[Integer](1, 2), 1: Integer, util.List.of[Integer](2, 0))), new NewTopic("mytopic2", 3, 3.toShort), - new NewTopic("mytopic3", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) + new NewTopic("mytopic3", Optional.empty[Integer], Optional.empty[java.lang.Short]) ) - val createResult = client.createTopics(newTopics.asJava) + val createResult = client.createTopics(newTopics) createResult.all.get() waitForTopics(client, topics, List()) val topicIds = getTopicIds().values.toSet @@ -712,7 +713,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { try { val timeoutOption = new DeleteTopicsOptions().timeoutMs(0) val exception = assertThrows(classOf[ExecutionException], () => - client.deleteTopics(Seq("test-topic").asJava, timeoutOption).all().get()) + client.deleteTopics(util.List.of("test-topic"), timeoutOption).all().get()) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally client.close(time.Duration.ZERO) } @@ -760,7 +761,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { try { val timeoutOption = new DescribeTopicsOptions().timeoutMs(0) val exception = assertThrows(classOf[ExecutionException], () => - client.describeTopics(Seq("test-topic").asJava, timeoutOption).allTopicNames().get()) + client.describeTopics(util.List.of("test-topic"), timeoutOption).allTopicNames().get()) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally client.close(time.Duration.ZERO) } @@ -777,7 +778,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { waitForTopics(client, Seq(existingTopic), List()) val nonExistingTopic = "non-existing" - val results = client.describeTopics(Seq(nonExistingTopic, existingTopic).asJava).topicNameValues() + val results = client.describeTopics(util.List.of(nonExistingTopic, existingTopic)).topicNameValues() assertEquals(existingTopic, results.get(existingTopic).get.name) assertFutureThrows(classOf[UnknownTopicOrPartitionException], results.get(nonExistingTopic)) } @@ -795,7 +796,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val nonExistingTopicId = Uuid.randomUuid() - val results = client.describeTopics(TopicCollection.ofTopicIds(Seq(existingTopicId, nonExistingTopicId).asJava)).topicIdValues() + val results = client.describeTopics(TopicCollection.ofTopicIds(util.List.of(existingTopicId, nonExistingTopicId))).topicIdValues() assertEquals(existingTopicId, results.get(existingTopicId).get.topicId()) assertFutureThrows(classOf[UnknownTopicIdException], results.get(nonExistingTopicId)) } @@ -810,7 +811,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ensureConsistentKRaftMetadata() val existingTopicId = brokers.head.metadataCache.getTopicId(existingTopic) - val results = client.describeTopics(TopicCollection.ofTopicNames(Seq(existingTopic).asJava)).topicNameValues() + val results = client.describeTopics(TopicCollection.ofTopicNames(util.List.of(existingTopic))).topicNameValues() assertEquals(existingTopicId, results.get(existingTopic).get.topicId()) } @@ -894,11 +895,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Generate two mutually exclusive replicaAssignment val firstReplicaAssignment = brokers.map { server => - val logDir = new File(server.config.logDirs(randomNums(server))).getAbsolutePath + val logDir = new File(server.config.logDirs.get(randomNums(server))).getAbsolutePath new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir }.toMap val secondReplicaAssignment = brokers.map { server => - val logDir = new File(server.config.logDirs(1 - randomNums(server))).getAbsolutePath + val logDir = new File(server.config.logDirs.get(1 - randomNums(server))).getAbsolutePath new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir }.toMap @@ -979,17 +980,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = createAdminClient val brokerException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.BROKER, "-1")).asJava).all().get() + client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.BROKER, "-1"))).all().get() }) assertInstanceOf(classOf[TimeoutException], brokerException.getCause) val topicException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.TOPIC, "none_topic")).asJava).all().get() + client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.TOPIC, "none_topic"))).all().get() }) assertInstanceOf(classOf[UnknownTopicOrPartitionException], topicException.getCause) val brokerLoggerException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "-1")).asJava).all().get() + client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "-1"))).all().get() }) assertInstanceOf(classOf[TimeoutException], brokerLoggerException.getCause) } @@ -999,7 +1000,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = createAdminClient val groupResource = new ConfigResource(ConfigResource.Type.GROUP, "none_group") - val groupResult = client.describeConfigs(Seq(groupResource).asJava).all().get().get(groupResource) + val groupResult = client.describeConfigs(util.List.of(groupResource)).all().get().get(groupResource) assertNotEquals(0, groupResult.entries().size()) } @@ -1024,8 +1025,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describe topics and broker val brokerResource1 = new ConfigResource(ConfigResource.Type.BROKER, brokers(1).config.brokerId.toString) val brokerResource2 = new ConfigResource(ConfigResource.Type.BROKER, brokers(2).config.brokerId.toString) - val configResources = Seq(topicResource1, topicResource2, brokerResource1, brokerResource2) - val describeResult = client.describeConfigs(configResources.asJava) + val configResources = util.List.of(topicResource1, topicResource2, brokerResource1, brokerResource2) + val describeResult = client.describeConfigs(configResources) val configs = describeResult.all.get assertEquals(4, configs.size) @@ -1094,22 +1095,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val groupResource = new ConfigResource(ConfigResource.Type.GROUP, group) // Alter group configs - var groupAlterConfigs = Seq( + var groupAlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ).asJavaCollection + ) - var alterResult = client.incrementalAlterConfigs(Map( - groupResource -> groupAlterConfigs - ).asJava) + var alterResult = client.incrementalAlterConfigs(util.Map.of( + groupResource, groupAlterConfigs + )) - assertEquals(Set(groupResource).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(groupResource), alterResult.values.keySet) alterResult.all.get(15, TimeUnit.SECONDS) ensureConsistentKRaftMetadata() // Describe group config, verify that group config was updated correctly - var describeResult = client.describeConfigs(Seq(groupResource).asJava) + var describeResult = client.describeConfigs(util.List.of(groupResource)) var configs = describeResult.all.get(15, TimeUnit.SECONDS) assertEquals(1, configs.size) @@ -1120,29 +1121,29 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(ConfigSource.DEFAULT_CONFIG, configs.get(groupResource).get(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG).source) // Alter group with validateOnly=true - groupAlterConfigs = Seq( + groupAlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "60000"), AlterConfigOp.OpType.SET) - ).asJava + ) - alterResult = client.incrementalAlterConfigs(Map( - groupResource -> groupAlterConfigs - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(util.Map.of( + groupResource, groupAlterConfigs + ), new AlterConfigsOptions().validateOnly(true)) alterResult.all.get(15, TimeUnit.SECONDS) // Verify that group config was not updated due to validateOnly = true - describeResult = client.describeConfigs(Seq(groupResource).asJava) + describeResult = client.describeConfigs(util.List.of(groupResource)) configs = describeResult.all.get(15, TimeUnit.SECONDS) assertEquals("50000", configs.get(groupResource).get(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG).value) // Alter group with validateOnly=true with invalid configs - groupAlterConfigs = Seq( + groupAlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "5"), AlterConfigOp.OpType.SET) - ).asJava + ) - alterResult = client.incrementalAlterConfigs(Map( - groupResource -> groupAlterConfigs - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(util.Map.of( + groupResource, groupAlterConfigs + ), new AlterConfigsOptions().validateOnly(true)) assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(groupResource), @@ -1176,27 +1177,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def numPartitions(topic: String, expectedNumPartitionsOpt: Option[Int]): Int = partitions(topic, expectedNumPartitionsOpt).size // validateOnly: try creating a new partition (no assignments), to bring the total to 3 partitions - var alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(3)).asJava, validateOnly) + var alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(3)), validateOnly) var altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 1) // try creating a new partition (no assignments), to bring the total to 3 partitions - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(3)).asJava, actuallyDoIt) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(3)), actuallyDoIt) altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 3) // validateOnly: now try creating a new partition (with assignments), to bring the total to 3 partitions - val newPartition2Assignments = asList[util.List[Integer]](asList(0, 1), asList(1, 2)) - alterResult = client.createPartitions(Map(topic2 -> - NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, validateOnly) + val newPartition2Assignments = util.List.of[util.List[Integer]](util.List.of[Integer](0, 1), util.List.of[Integer](1, 2)) + alterResult = client.createPartitions(util.Map.of(topic2, + NewPartitions.increaseTo(3, newPartition2Assignments)), validateOnly) altered = alterResult.values.get(topic2).get TestUtils.waitForAllPartitionsMetadata(brokers, topic2, expectedNumPartitions = 1) // now try creating a new partition (with assignments), to bring the total to 3 partitions - alterResult = client.createPartitions(Map(topic2 -> - NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, actuallyDoIt) + alterResult = client.createPartitions(util.Map.of(topic2, + NewPartitions.increaseTo(3, newPartition2Assignments)), actuallyDoIt) altered = alterResult.values.get(topic2).get val actualPartitions2 = partitions(topic2, expectedNumPartitionsOpt = Some(3)) assertEquals(3, actualPartitions2.size) @@ -1208,8 +1209,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val desc = if (option.validateOnly()) "validateOnly" else "validateOnly=false" // try a newCount which would be a decrease - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(1)).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(1)), option) var e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when newCount is a decrease") @@ -1219,8 +1220,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (without assignment) - alterResult = client.createPartitions(Map(topic2 -> - NewPartitions.increaseTo(3)).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic2, + NewPartitions.increaseTo(3)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get, () => s"$desc: Expect InvalidPartitionsException when requesting a noop") assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) @@ -1229,16 +1230,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic2, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (where the assignment matches current state) - alterResult = client.createPartitions(Map(topic2 -> - NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic2, + NewPartitions.increaseTo(3, newPartition2Assignments)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get) assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) assertEquals(3, numPartitions(topic2, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (where the assignment doesn't match current state) - alterResult = client.createPartitions(Map(topic2 -> - NewPartitions.increaseTo(3, newPartition2Assignments.asScala.reverse.toList.asJava)).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic2, + NewPartitions.increaseTo(3, newPartition2Assignments.asScala.reverse.toList.asJava)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get) assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) @@ -1246,8 +1247,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // try a bad topic name val unknownTopic = "an-unknown-topic" - alterResult = client.createPartitions(Map(unknownTopic -> - NewPartitions.increaseTo(2)).asJava, option) + alterResult = client.createPartitions(util.Map.of(unknownTopic, + NewPartitions.increaseTo(2)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(unknownTopic).get, () => s"$desc: Expect InvalidTopicException when using an unknown topic") assertTrue(e.getCause.isInstanceOf[UnknownTopicOrPartitionException], desc) @@ -1255,8 +1256,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) // try an invalid newCount - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(-22)).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(-22)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when newCount is invalid") assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) @@ -1266,8 +1267,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments where the number of brokers != replication factor - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4, asList(asList(1, 2)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1, 2)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when #brokers != replication factor") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1277,8 +1278,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try #assignments < with the increase - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(6, asList(asList(1)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(6, util.List.of(util.List.of[Integer](1)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1287,8 +1288,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try #assignments > with the increase - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4, asList(asList(1), asList(2)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1), util.List.of[Integer](2)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount") exceptionMsgStr = "Attempted to add 1 additional partition(s), but only 2 assignment(s) were specified." @@ -1297,8 +1298,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try with duplicate brokers in assignments - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4, asList(asList(1, 1)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1, 1)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments has duplicate brokers") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1307,8 +1308,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments with differently sized inner lists - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(5, asList(asList(1), asList(1, 0)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(5, util.List.of(util.List.of[Integer](1), util.List.of[Integer](1, 0)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments have differently sized inner lists") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1318,8 +1319,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments with unknown brokers - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4, asList(asList(12)))).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](12)))), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments contains an unknown broker") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1328,8 +1329,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try with empty assignments - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4, Collections.emptyList())).asJava, option) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4, util.List.of)), option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments is empty") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1339,9 +1340,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } // a mixed success, failure response - alterResult = client.createPartitions(Map( - topic1 -> NewPartitions.increaseTo(4), - topic2 -> NewPartitions.increaseTo(2)).asJava, actuallyDoIt) + alterResult = client.createPartitions(util.Map.of( + topic1, NewPartitions.increaseTo(4), + topic2, NewPartitions.increaseTo(2)), actuallyDoIt) // assert that the topic1 now has 4 partitions altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 4) @@ -1353,10 +1354,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Delete the topic. Verify addition of partitions to deleted topic is not possible. // In KRaft, the deletion occurs immediately and hence we have a different Exception thrown in the response. - val deleteResult = client.deleteTopics(asList(topic1)) + val deleteResult = client.deleteTopics(util.List.of(topic1)) deleteResult.topicNameValues.get(topic1).get - alterResult = client.createPartitions(Map(topic1 -> - NewPartitions.increaseTo(4)).asJava, validateOnly) + alterResult = client.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(4)), validateOnly) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => "Expect InvalidTopicException or UnknownTopicOrPartitionException when the topic is queued for deletion") assertTrue(e.getCause.isInstanceOf[UnknownTopicOrPartitionException], e.toString) @@ -1375,21 +1376,21 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - consumer.seekToBeginning(Collections.singleton(topicPartition)) + consumer.seekToBeginning(util.Set.of(topicPartition)) assertEquals(0L, consumer.position(topicPartition)) - val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) + val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) val lowWatermark = result.lowWatermarks().get(topicPartition).get.lowWatermark assertEquals(5L, lowWatermark) - consumer.seekToBeginning(Collections.singletonList(topicPartition)) + consumer.seekToBeginning(util.List.of(topicPartition)) assertEquals(5L, consumer.position(topicPartition)) consumer.seek(topicPartition, 7L) assertEquals(7L, consumer.position(topicPartition)) - client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava).all.get - consumer.seekToBeginning(Collections.singletonList(topicPartition)) + client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK))).all.get + consumer.seekToBeginning(util.List.of(topicPartition)) assertEquals(10L, consumer.position(topicPartition)) } @@ -1405,7 +1406,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) + var result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) var lowWatermark: Option[Long] = Some(result.lowWatermarks.get(topicPartition).get.lowWatermark) assertEquals(Some(5), lowWatermark) @@ -1419,7 +1420,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.waitUntilTrue(() => { // Need to retry if leader is not available for the partition - result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(0L)).asJava) + result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(0L))) lowWatermark = None val future = result.lowWatermarks().get(topicPartition) @@ -1446,7 +1447,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) + val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) val lowWatermark = result.lowWatermarks.get(topicPartition).get.lowWatermark assertEquals(3L, lowWatermark) @@ -1480,7 +1481,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 100, topicPartition) - val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) + val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) result.all().get() // start the stopped broker to verify that it will be able to fetch from new log start offset @@ -1495,7 +1496,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // kill the same follower again, produce more records, and delete records beyond follower's LOE killBroker(followerIndex) sendRecords(producer, 100, topicPartition) - val result1 = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(117L)).asJava) + val result1 = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(117L))) result1.all().get() restartDeadBrokers() TestUtils.waitForBrokersInIsr(client, topicPartition, Set(followerIndex)) @@ -1511,7 +1512,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { sendRecords(producer, expectedLEO, topicPartition) // delete records to move log start offset - val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) + val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) result.all().get() // make sure we are in the expected state after delete records for (i <- 0 until brokerCount) { @@ -1520,11 +1521,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } // we will create another dir just for one server - val futureLogDir = brokers(0).config.logDirs(1) + val futureLogDir = brokers(0).config.logDirs.get(1) val futureReplica = new TopicPartitionReplica(topic, 0, brokers(0).config.brokerId) // Verify that replica can be moved to the specified log directory - client.alterReplicaLogDirs(Map(futureReplica -> futureLogDir).asJava).all.get + client.alterReplicaLogDirs(util.Map.of(futureReplica, futureLogDir)).all.get TestUtils.waitUntilTrue(() => { futureLogDir == brokers(0).logManager.getLog(topicPartition).get.dir.getParent }, "timed out waiting for replica movement") @@ -1547,19 +1548,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - var returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) + var returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) assertTrue(returnedOffsets.containsKey(topicPartition)) assertEquals(0L, returnedOffsets.get(topicPartition).offset()) - var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) + var result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) result.all.get - returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) + returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) assertTrue(returnedOffsets.containsKey(topicPartition)) assertEquals(5L, returnedOffsets.get(topicPartition).offset()) - result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava) + result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK))) result.all.get - returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) + returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) assertTrue(returnedOffsets.containsKey(topicPartition)) assertNull(returnedOffsets.get(topicPartition)) } @@ -1568,7 +1569,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testDeleteRecordsAfterCorruptRecords(groupProtocol: String): Unit = { val config = new Properties() - config.put(TopicConfig.SEGMENT_BYTES_CONFIG, "200") + config.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "200") createTopic(topic, numPartitions = 1, replicationFactor = 1, config) client = createAdminClient @@ -1584,10 +1585,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { sendRecords(0, 10) sendRecords(10, 20) - val topicDesc = client.describeTopics(Collections.singletonList(topic)).allTopicNames().get().get(topic) + val topicDesc = client.describeTopics(util.List.of(topic)).allTopicNames().get().get(topic) assertEquals(1, topicDesc.partitions().size()) val partitionLeaderId = topicDesc.partitions().get(0).leader().id() - val logDirMap = client.describeLogDirs(Collections.singletonList(partitionLeaderId)) + val logDirMap = client.describeLogDirs(util.List.of(partitionLeaderId)) .allDescriptions().get().get(partitionLeaderId) val logDir = logDirMap.entrySet.stream .filter(entry => entry.getValue.replicaInfos.containsKey(topicPartition)).findAny().get().getKey @@ -1608,22 +1609,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val overrideConfig = new Properties overrideConfig.setProperty("auto.offset.reset", "earliest") val consumer = createConsumer(configOverrides = overrideConfig) - consumer.subscribe(Seq(topic).asJava) + consumer.subscribe(util.List.of(topic)) assertEquals("Encountered corrupt message when fetching offset 0 for topic-partition topic-0", assertThrows(classOf[KafkaException], () => consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS))).getMessage) val partitionFollowerId = brokers.map(b => b.config.nodeId).filter(id => id != partitionLeaderId).head - val newAssignment = Map(topicPartition -> Optional.of(new NewPartitionReassignment( - List(Integer.valueOf(partitionLeaderId), Integer.valueOf(partitionFollowerId)).asJava))).asJava + val newAssignment = util.Map.of(topicPartition, Optional.of(new NewPartitionReassignment( + util.List.of(Integer.valueOf(partitionLeaderId), Integer.valueOf(partitionFollowerId))))) // add follower to topic partition client.alterPartitionReassignments(newAssignment).all().get() // delete records in corrupt segment (the first segment) - client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(firstSegmentRecordsSize)).asJava).all.get + client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(firstSegmentRecordsSize))).all.get // verify reassignment is finished after delete records TestUtils.waitForBrokersInIsr(client, topicPartition, Set(partitionLeaderId, partitionFollowerId)) // seek to beginning and make sure we can consume all records - consumer.seekToBeginning(Collections.singletonList(topicPartition)) + consumer.seekToBeginning(util.List.of(topicPartition)) assertEquals(19, TestUtils.consumeRecords(consumer, 20 - firstSegmentRecordsSize).last.offset()) } @@ -1640,12 +1641,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { var messageCount = 0 TestUtils.consumeRecords(consumer, 10) - client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava).all.get + client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))).all.get consumer.seek(topicPartition, 1) messageCount = 0 TestUtils.consumeRecords(consumer, 7) - client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(8L)).asJava).all.get + client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(8L))).all.get consumer.seek(topicPartition, 1) messageCount = 0 TestUtils.consumeRecords(consumer, 2) @@ -1662,12 +1663,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - assertEquals(5L, client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) + assertEquals(5L, client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) .lowWatermarks.get(topicPartition).get.lowWatermark) // OffsetOutOfRangeException if offset > high_watermark val cause = assertThrows(classOf[ExecutionException], - () => client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(20L)).asJava).lowWatermarks.get(topicPartition).get).getCause + () => client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(20L))).lowWatermarks.get(topicPartition).get).getCause assertEquals(classOf[OffsetOutOfRangeException], cause.getClass) } @@ -1698,7 +1699,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = createAdminClient val resource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val resources = Collections.singletonList(resource) + val resources = util.List.of(resource) val includeDocumentation = new DescribeConfigsOptions().includeDocumentation(true) var describeConfigs = client.describeConfigs(resources, includeDocumentation) var configEntries = describeConfigs.values().get(resource).get().entries() @@ -1711,7 +1712,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } private def subscribeAndWaitForAssignment(topic: String, consumer: Consumer[Array[Byte], Array[Byte]]): Unit = { - consumer.subscribe(Collections.singletonList(topic)) + consumer.subscribe(util.List.of(topic)) TestUtils.pollUntilTrue(consumer, () => !consumer.assignment.isEmpty, "Expected non-empty assignment") } @@ -1744,8 +1745,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) client = createAdminClient assertFutureThrows(classOf[SecurityDisabledException], client.describeAcls(AclBindingFilter.ANY).values()) - assertFutureThrows(classOf[SecurityDisabledException], client.createAcls(Collections.singleton(acl)).all()) - assertFutureThrows(classOf[SecurityDisabledException], client.deleteAcls(Collections.singleton(acl.toFilter())).all()) + assertFutureThrows(classOf[SecurityDisabledException], client.createAcls(util.Set.of(acl)).all()) + assertFutureThrows(classOf[SecurityDisabledException], client.deleteAcls(util.Set.of(acl.toFilter)).all()) } /** @@ -1819,48 +1820,90 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(1, factory.failuresInjected) } - /** - * Test the consumer group APIs. - */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumerGroups(groupProtocol: String): Unit = { + def testListConsumerGroupOffsets(groupProtocol: String): Unit = { val config = createConfig client = Admin.create(config) try { - // Verify that initially there are no consumer groups to list. - val list1 = client.listConsumerGroups() - assertEquals(0, list1.all().get().size()) - assertEquals(0, list1.errors().get().size()) - assertEquals(0, list1.valid().get().size()) + assertConsumerGroupsIsClean() + val testTopicName = "test_topic" - val testTopicName1 = testTopicName + "1" - val testTopicName2 = testTopicName + "2" - val testNumPartitions = 2 + prepareTopics(List(testTopicName), 2) + prepareRecords(testTopicName) - client.createTopics(util.Arrays.asList( - new NewTopic(testTopicName, testNumPartitions, 1.toShort), - new NewTopic(testTopicName1, testNumPartitions, 1.toShort), - new NewTopic(testTopicName2, testNumPartitions, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + val groupInstances = Set("") + val topics = Set(testTopicName) + + // We need to disable the auto commit because after the members got removed from group, the offset commit + // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) + val defaultConsumerConfig = new Properties(consumerConfig) + defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) - val producer = createProducer() try { - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + // Start consumer polling threads in the background + backgroundConsumers.start() + val topicPartition = new TopicPartition(testTopicName, 0) + + // Test listConsumerGroupOffsets + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(topicPartition) && (parts.get(topicPartition).offset() == 1) + }, "Expected the offset for partition 0 to eventually become 1.") + + // Test listConsumerGroupOffsets with requireStable true + val options = new ListConsumerGroupOffsetsOptions().requireStable(true) + var parts = client.listConsumerGroupOffsets(testGroupId, options) + .partitionsToOffsetAndMetadata() + .get() + assertTrue(parts.containsKey(topicPartition)) + assertEquals(1, parts.get(topicPartition).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec + val groupSpecs = util.Map.of( + testGroupId, + new ListConsumerGroupOffsetsSpec().topicPartitions(util.List.of(new TopicPartition(testTopicName, 0))) + ) + parts = client.listConsumerGroupOffsets(groupSpecs) + .partitionsToOffsetAndMetadata() + .get() + assertTrue(parts.containsKey(topicPartition)) + assertEquals(1, parts.get(topicPartition).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option + parts = client.listConsumerGroupOffsets(groupSpecs, options) + .partitionsToOffsetAndMetadata() + .get() + assertTrue(parts.containsKey(topicPartition)) + assertEquals(1, parts.get(topicPartition).offset()) } finally { - Utils.closeQuietly(producer, "producer") + backgroundConsumers.close() } + } finally { + Utils.closeQuietly(client, "adminClient") + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testListConsumerGroups(groupProtocol: String): Unit = { + val config = createConfig + client = Admin.create(config) + try { + assertConsumerGroupsIsClean() + + val testTopicName = "test_topic" + prepareTopics(List(testTopicName), 2) val testGroupId = "test_group_id" val testClientId = "test_client_id" - val testInstanceId1 = "test_instance_id_1" - val testInstanceId2 = "test_instance_id_2" - val fakeGroupId = "fake_group_id" - - // contains two static members and one dynamic member - val groupInstanceSet = Set(testInstanceId1, testInstanceId2, "") - val topicSet = Set(testTopicName, testTopicName1, testTopicName2) + val groupInstances = Set("") + val topics = Set(testTopicName) // We need to disable the auto commit because after the members got removed from group, the offset commit // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) @@ -1868,32 +1911,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - - val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) - groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => - val configOverrides = new Properties() - if (groupInstanceId != "") { - // static member - configOverrides.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) - } - backgroundConsumerSet.addConsumer(topic, configOverrides) - } + val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) try { val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC // Start consumer polling threads in the background - backgroundConsumerSet.start() + backgroundConsumers.start() // Test that we can list the new group. TestUtils.waitUntilTrue(() => { val matching = client.listConsumerGroups.all.get.asScala.filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE) + group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) @@ -1901,30 +1934,78 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in group type $groupType") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) - .inGroupStates(Set(GroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) + .inGroupStates(util.Set.of(GroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE) + group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE) + group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) - val matching = client.listConsumerGroups(options).all.get.asScala.filter( - _.groupId == testGroupId) + val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.EMPTY)) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(_.groupId == testGroupId) matching.isEmpty - }, s"Expected to find zero groups") + }, "Expected to find zero groups") + } finally { + backgroundConsumers.close() + } + } finally { + Utils.closeQuietly(client, "adminClient") + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersAll")) + def testDescribeGroups(groupProtocol: String): Unit = { + val config = createConfig + client = Admin.create(config) + try { + assertConsumerGroupsIsClean() + + val testTopicName = "test_topic" + val testTopicName1 = testTopicName + "1" + val testTopicName2 = testTopicName + "2" + val testNumPartitions = 2 + prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) + + val producer = createProducer() + try { + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + } finally { + Utils.closeQuietly(producer, "producer") + } + + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + val testInstanceId1 = "test_instance_id_1" + val testInstanceId2 = "test_instance_id_2" + val fakeGroupId = "fake_group_id" + + // contains two static members and one dynamic member + val groupInstances = Set(testInstanceId1, testInstanceId2, "") + val topics = Set(testTopicName, testTopicName1, testTopicName2) + + // We need to disable the auto commit because after the members got removed from group, the offset commit + // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) + val defaultConsumerConfig = new Properties(consumerConfig) + defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) + + try { + val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC + // Start consumer polling threads in the background + backgroundConsumers.start() - val describeWithFakeGroupResult = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava, + val describeWithFakeGroupResult = client.describeConsumerGroups(util.List.of(testGroupId, fakeGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) @@ -1941,17 +2022,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(testGroupId, testGroupDescription.groupId()) assertFalse(testGroupDescription.isSimpleConsumerGroup) - assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) + assertEquals(groupInstances.size, testGroupDescription.members().size()) val members = testGroupDescription.members() members.asScala.foreach { member => assertEquals(testClientId, member.clientId) assertEquals(if (groupType == GroupType.CLASSIC) Optional.empty else Optional.of(true), member.upgraded) } val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) - topicSet.foreach { topic => - val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) - assertEquals(testNumPartitions, topicPartitions.size) - } + topics.foreach(topic => assertEquals(testNumPartitions, topicPartitionsByTopic.getOrElse(topic, List.empty).size)) val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) @@ -1964,35 +2042,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Test that all() also throws GroupIdNotFoundException assertFutureThrows(classOf[GroupIdNotFoundException], describeWithFakeGroupResult.all(), s"Group $fakeGroupId not found.") - - val testTopicPart0 = new TopicPartition(testTopicName, 0) - - // Test listConsumerGroupOffsets - TestUtils.waitUntilTrue(() => { - val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() - parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 1) - }, s"Expected the offset for partition 0 to eventually become 1.") - - // Test listConsumerGroupOffsets with requireStable true - val options = new ListConsumerGroupOffsetsOptions().requireStable(true) - var parts = client.listConsumerGroupOffsets(testGroupId, options) - .partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec - val groupSpecs = Collections.singletonMap(testGroupId, - new ListConsumerGroupOffsetsSpec().topicPartitions(Collections.singleton(new TopicPartition(testTopicName, 0)))) - parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option - parts = client.listConsumerGroupOffsets(groupSpecs, options).partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) } finally { - backgroundConsumerSet.close() + backgroundConsumers.close() } } finally { Utils.closeQuietly(client, "adminClient") @@ -2017,7 +2068,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val testTopicName = "test_topic" val testNumPartitions = 2 - client.createTopics(util.Arrays.asList( + client.createTopics(util.List.of( new NewTopic(testTopicName, testNumPartitions, 1.toShort), )).all.get waitForTopics(client, List(testTopicName), List()) @@ -2041,25 +2092,25 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) classicConsumer = createConsumer(configOverrides = newConsumerConfig) - classicConsumer.subscribe(List(testTopicName).asJava) + classicConsumer.subscribe(util.List.of(testTopicName)) classicConsumer.poll(JDuration.ofMillis(1000)) newConsumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, testConsumerClientId) consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name) consumerConsumer = createConsumer(configOverrides = newConsumerConfig) - consumerConsumer.subscribe(List(testTopicName).asJava) + consumerConsumer.subscribe(util.List.of(testTopicName)) consumerConsumer.poll(JDuration.ofMillis(1000)) TestUtils.waitUntilTrue(() => { classicConsumer.poll(JDuration.ofMillis(100)) consumerConsumer.poll(JDuration.ofMillis(100)) - val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get + val describeConsumerGroupResult = client.describeConsumerGroups(util.List.of(testGroupId)).all.get describeConsumerGroupResult.containsKey(testGroupId) && describeConsumerGroupResult.get(testGroupId).groupState == GroupState.STABLE && describeConsumerGroupResult.get(testGroupId).members.size == 2 }, s"Expected to find 2 members in a stable group $testGroupId") - val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get + val describeConsumerGroupResult = client.describeConsumerGroups(util.List.of(testGroupId)).all.get val group = describeConsumerGroupResult.get(testGroupId) assertNotNull(group) assertEquals(Optional.of(2), group.groupEpoch) @@ -2090,29 +2141,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val config = createConfig client = Admin.create(config) try { - // Verify that initially there are no consumer groups to list. - val list1 = client.listConsumerGroups() - assertEquals(0, list1.all().get().size()) - assertEquals(0, list1.errors().get().size()) - assertEquals(0, list1.valid().get().size()) + assertConsumerGroupsIsClean() + val testTopicName = "test_topic" val testTopicName1 = testTopicName + "1" val testTopicName2 = testTopicName + "2" val testNumPartitions = 2 - client.createTopics(util.Arrays.asList( - new NewTopic(testTopicName, testNumPartitions, 1.toShort), - new NewTopic(testTopicName1, testNumPartitions, 1.toShort), - new NewTopic(testTopicName2, testNumPartitions, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) - - val producer = createProducer() - try { - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() - } finally { - Utils.closeQuietly(producer, "producer") - } + prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) + prepareRecords(testTopicName) val testGroupId = "test_group_id" val testClientId = "test_client_id" @@ -2156,7 +2193,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.state.get == ConsumerGroupState.STABLE && @@ -2165,8 +2202,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in group type $groupType") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) - .inStates(Set(ConsumerGroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) + .inStates(util.Set.of(ConsumerGroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.state.get == ConsumerGroupState.STABLE && @@ -2175,8 +2212,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) - .inGroupStates(Set(GroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) + .inGroupStates(util.Set.of(GroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.state.get == ConsumerGroupState.STABLE && @@ -2185,7 +2222,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().inStates(util.Set.of(ConsumerGroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.state.get == ConsumerGroupState.STABLE && @@ -2194,7 +2231,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.STABLE)) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && group.state.get == ConsumerGroupState.STABLE && @@ -2203,20 +2240,20 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.EMPTY).asJava) + val options = new ListConsumerGroupsOptions().inStates(util.Set.of(ConsumerGroupState.EMPTY)) val matching = client.listConsumerGroups(options).all.get.asScala.filter( _.groupId == testGroupId) matching.isEmpty }, s"Expected to find zero groups") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) + val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.EMPTY)) val matching = client.listConsumerGroups(options).all.get.asScala.filter( _.groupId == testGroupId) matching.isEmpty }, s"Expected to find zero groups") - val describeWithFakeGroupResult = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava, + val describeWithFakeGroupResult = client.describeConsumerGroups(util.List.of(testGroupId, fakeGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) @@ -2263,8 +2300,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(1, parts.get(testTopicPart0).offset()) // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec - val groupSpecs = Collections.singletonMap(testGroupId, - new ListConsumerGroupOffsetsSpec().topicPartitions(Collections.singleton(new TopicPartition(testTopicName, 0)))) + val groupSpecs = util.Map.of(testGroupId, + new ListConsumerGroupOffsetsSpec().topicPartitions(util.Set.of(new TopicPartition(testTopicName, 0)))) parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() assertTrue(parts.containsKey(testTopicPart0)) assertEquals(1, parts.get(testTopicPart0).offset()) @@ -2292,28 +2329,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = Admin.create(config) try { // Verify that initially there are no consumer groups to list. - val list1 = client.listConsumerGroups() - assertEquals(0, list1.all().get().size()) - assertEquals(0, list1.errors().get().size()) - assertEquals(0, list1.valid().get().size()) + assertConsumerGroupsIsClean() val testTopicName = "test_topic" val testTopicName1 = testTopicName + "1" val testTopicName2 = testTopicName + "2" val testNumPartitions = 2 - client.createTopics(util.Arrays.asList( - new NewTopic(testTopicName, testNumPartitions, 1.toShort), - new NewTopic(testTopicName1, testNumPartitions, 1.toShort), - new NewTopic(testTopicName2, testNumPartitions, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) + prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) - val producer = createProducer() - try { - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() - } finally { - Utils.closeQuietly(producer, "producer") - } + prepareRecords(testTopicName) val testGroupId = "test_group_id" val testClientId = "test_client_id" @@ -2331,9 +2355,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - // We need to set internal.leave.group.on.close to validate dynamic member removal, but it only works for ClassicConsumer - // After KIP-1092, we can control dynamic member removal for both ClassicConsumer and AsyncConsumer - defaultConsumerConfig.setProperty("internal.leave.group.on.close", "false") val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => @@ -2352,7 +2373,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Test delete non-exist consumer instance val invalidInstanceId = "invalid-instance-id" var removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions( - Collections.singleton(new MemberToRemove(invalidInstanceId)) + util.Set.of(new MemberToRemove(invalidInstanceId)) )) assertFutureThrows(classOf[UnknownMemberIdException], removeMembersResult.all) @@ -2360,7 +2381,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertFutureThrows(classOf[UnknownMemberIdException], firstMemberFuture) // Test consumer group deletion - var deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava) + var deleteResult = client.deleteConsumerGroups(util.List.of(testGroupId, fakeGroupId)) assertEquals(2, deleteResult.deletedGroups().size()) // Deleting the fake group ID should get GroupIdNotFoundException. @@ -2375,50 +2396,39 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { backgroundConsumerSet.stop() // Check the members in the group after consumers have stopped - var describeTestGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava, + var describeTestGroupResult = client.describeConsumerGroups(util.List.of(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) assertEquals(1, describeTestGroupResult.describedGroups().size()) var testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() assertEquals(testGroupId, testGroupDescription.groupId) assertFalse(testGroupDescription.isSimpleConsumerGroup) - - // Although we set `internal.leave.group.on.close` in the consumer, it only works for ClassicConsumer. - // After KIP-1092, we can control dynamic member removal in consumer.close() - if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) { - assertEquals(3, testGroupDescription.members().size()) - } else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) { - assertEquals(2, testGroupDescription.members().size()) - } + assertEquals(2, testGroupDescription.members().size()) // Test delete one static member removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, - new RemoveMembersFromConsumerGroupOptions(Collections.singleton(new MemberToRemove(testInstanceId1)))) + new RemoveMembersFromConsumerGroupOptions(util.Set.of(new MemberToRemove(testInstanceId1)))) assertNull(removeMembersResult.all().get()) assertNull(removeMembersResult.memberResult(new MemberToRemove(testInstanceId1)).get()) - describeTestGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava, + describeTestGroupResult = client.describeConsumerGroups(util.List.of(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() - if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) { - assertEquals(2, testGroupDescription.members().size()) - } else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) { - assertEquals(1, testGroupDescription.members().size()) - } + assertEquals(1, testGroupDescription.members().size()) // Delete all active members remaining removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions()) assertNull(removeMembersResult.all().get()) // The group should contain no members now. - testGroupDescription = client.describeConsumerGroups(Seq(testGroupId).asJava, + testGroupDescription = client.describeConsumerGroups(util.List.of(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)).describedGroups().get(testGroupId).get() assertTrue(testGroupDescription.members().isEmpty) // Consumer group deletion on empty group should succeed - deleteResult = client.deleteConsumerGroups(Seq(testGroupId).asJava) + deleteResult = client.deleteConsumerGroups(util.List.of(testGroupId)) assertEquals(1, deleteResult.deletedGroups().size()) assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) @@ -2427,7 +2437,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Test alterConsumerGroupOffsets when group is empty val testTopicPart0 = new TopicPartition(testTopicName, 0) val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(testGroupId, - Collections.singletonMap(testTopicPart0, new OffsetAndMetadata(0L))) + util.Map.of(testTopicPart0, new OffsetAndMetadata(0L))) assertNull(alterConsumerGroupOffsetsResult.all().get()) assertNull(alterConsumerGroupOffsetsResult.partitionResult(testTopicPart0).get()) @@ -2458,16 +2468,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val tp1 = new TopicPartition(testTopicName, 0) val tp2 = new TopicPartition("foo", 0) - client.createTopics(Collections.singleton( + client.createTopics(util.Set.of( new NewTopic(testTopicName, 1, 1.toShort))).all().get() waitForTopics(client, List(testTopicName), List()) - val producer = createProducer() - try { - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() - } finally { - Utils.closeQuietly(producer, "producer") - } + prepareRecords(testTopicName) val newConsumerConfig = new Properties(consumerConfig) newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) @@ -2479,13 +2484,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } Using.resource(createConsumer(configOverrides = newConsumerConfig)) { consumer => - consumer.subscribe(Collections.singletonList(testTopicName)) + consumer.subscribe(util.List.of(testTopicName)) val records = consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) assertNotEquals(0, records.count) consumer.commitSync() // Test offset deletion while consuming - val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) + val partitions = new util.LinkedHashSet[TopicPartition](util.List.of(tp1, tp2)) + val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, partitions) // Top level error will equal to the first partition level error assertFutureThrows(classOf[GroupSubscribedToTopicException], offsetDeleteResult.all()) @@ -2493,7 +2499,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.partitionResult(tp2)) // Test the fake group ID - val fakeDeleteResult = client.deleteConsumerGroupOffsets(fakeGroupId, Set(tp1, tp2).asJava) + val fakeDeleteResult = client.deleteConsumerGroupOffsets(fakeGroupId, util.Set.of(tp1, tp2)) assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.all()) assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.partitionResult(tp1)) @@ -2501,7 +2507,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } // Test offset deletion when group is empty - val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) + val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, util.Set.of(tp1, tp2)) assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.all()) assertNull(offsetDeleteResult.partitionResult(tp1).get()) @@ -2511,15 +2517,61 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kip932")) - def testListGroups(unused: String): Unit = { + private def prepareTopics(topics: List[String], numberOfPartitions: Int): Unit = { + client.createTopics(topics.map(topic => new NewTopic(topic, numberOfPartitions, 1.toShort)).asJava).all().get() + waitForTopics(client, topics, List()) + } + + private def prepareRecords(testTopicName: String) = { + val producer = createProducer() + try { + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + } finally { + Utils.closeQuietly(producer, "producer") + } + } + + private def prepareConsumers(groupInstanceSet: Set[String], topicSet: Set[String], defaultConsumerConfig: Properties) = { + val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) + groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => + val configOverrides = new Properties() + if (groupInstanceId != "") { + // static member + configOverrides.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) + } + backgroundConsumerSet.addConsumer(topic, configOverrides) + } + backgroundConsumerSet + } + + /** + * Verify that initially there are no consumer groups to list. + */ + private def assertConsumerGroupsIsClean(): Unit = { + val listResult = client.listConsumerGroups() + assertEquals(0, listResult.all().get().size()) + assertEquals(0, listResult.errors().get().size()) + assertEquals(0, listResult.valid().get().size()) + } + + @Test + def testListGroups(): Unit = { val classicGroupId = "classic_group_id" val consumerGroupId = "consumer_group_id" val shareGroupId = "share_group_id" val simpleGroupId = "simple_group_id" + val streamsGroupId = "streams_group_id" val testTopicName = "test_topic" + val config = createConfig + client = Admin.create(config) + + client.createTopics(util.Set.of( + new NewTopic(testTopicName, 1, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName), List()) + val topicPartition = new TopicPartition(testTopicName, 0) + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) val classicGroupConfig = new Properties(consumerConfig) classicGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, classicGroupId) @@ -2534,60 +2586,76 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { shareGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroupId) val shareGroup = createShareConsumer(configOverrides = shareGroupConfig) - val config = createConfig - client = Admin.create(config) - try { - client.createTopics(Collections.singleton( - new NewTopic(testTopicName, 1, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName), List()) - val topicPartition = new TopicPartition(testTopicName, 0) + val streamsGroup = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId + ) - classicGroup.subscribe(Collections.singleton(testTopicName)) + try { + classicGroup.subscribe(util.Set.of(testTopicName)) classicGroup.poll(JDuration.ofMillis(1000)) - consumerGroup.subscribe(Collections.singleton(testTopicName)) + consumerGroup.subscribe(util.Set.of(testTopicName)) consumerGroup.poll(JDuration.ofMillis(1000)) - shareGroup.subscribe(Collections.singleton(testTopicName)) + shareGroup.subscribe(util.Set.of(testTopicName)) shareGroup.poll(JDuration.ofMillis(1000)) + streamsGroup.poll(JDuration.ofMillis(1000)) val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, - Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) + util.Map.of(topicPartition, new OffsetAndMetadata(0L))) assertNull(alterConsumerGroupOffsetsResult.all().get()) assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) TestUtils.waitUntilTrue(() => { val groups = client.listGroups().all().get() - groups.size() == 4 + groups.size() == 5 }, "Expected to find all groups") val classicGroupListing = new GroupListing(classicGroupId, Optional.of(GroupType.CLASSIC), "consumer", Optional.of(GroupState.STABLE)) val consumerGroupListing = new GroupListing(consumerGroupId, Optional.of(GroupType.CONSUMER), "consumer", Optional.of(GroupState.STABLE)) val shareGroupListing = new GroupListing(shareGroupId, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)) val simpleGroupListing = new GroupListing(simpleGroupId, Optional.of(GroupType.CLASSIC), "", Optional.of(GroupState.EMPTY)) + val streamsGroupListing = new GroupListing(streamsGroupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)) var listGroupsResult = client.listGroups() assertTrue(listGroupsResult.errors().get().isEmpty) - assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.all().get().asScala.toSet) - assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.valid().get().asScala.toSet) - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CLASSIC))) + TestUtils.waitUntilTrue(() => { + val listGroupResultScala = client.listGroups().all().get().asScala + val filteredStreamsGroups = listGroupResultScala.filter(_.groupId() == streamsGroupId) + val filteredClassicGroups = listGroupResultScala.filter(_.groupId() == classicGroupId) + val filteredConsumerGroups = listGroupResultScala.filter(_.groupId() == consumerGroupId) + val filteredShareGroups = listGroupResultScala.filter(_.groupId() == shareGroupId) + filteredClassicGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && + filteredConsumerGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && + filteredShareGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && + filteredStreamsGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) + }, "Groups not stable yet") + + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.CLASSIC))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.valid().get().asScala.toSet) - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CONSUMER))) + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.CONSUMER))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(consumerGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(consumerGroupListing), listGroupsResult.valid().get().asScala.toSet) - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.SHARE))) + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(shareGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(shareGroupListing), listGroupsResult.valid().get().asScala.toSet) + + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.STREAMS))) + assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(streamsGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(streamsGroupListing), listGroupsResult.valid().get().asScala.toSet) + } finally { Utils.closeQuietly(classicGroup, "classicGroup") Utils.closeQuietly(consumerGroup, "consumerGroup") Utils.closeQuietly(shareGroup, "shareGroup") + Utils.closeQuietly(streamsGroup, "streamsGroup") Utils.closeQuietly(client, "adminClient") } } @@ -2606,27 +2674,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val config = createConfig client = Admin.create(config) try { - client.createTopics(Collections.singleton( + client.createTopics(util.Set.of( new NewTopic(testTopicName, 1, 1.toShort) )).all().get() waitForTopics(client, List(testTopicName), List()) val topicPartition = new TopicPartition(testTopicName, 0) - classicGroup.subscribe(Collections.singleton(testTopicName)) + classicGroup.subscribe(util.Set.of(testTopicName)) classicGroup.poll(JDuration.ofMillis(1000)) val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, - Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) + util.Map.of(topicPartition, new OffsetAndMetadata(0L))) assertNull(alterConsumerGroupOffsetsResult.all().get()) assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) - val groupIds = Seq(simpleGroupId, classicGroupId) + val groupIds = util.List.of(simpleGroupId, classicGroupId) TestUtils.waitUntilTrue(() => { - val groups = client.describeClassicGroups(groupIds.asJavaCollection).all().get() + val groups = client.describeClassicGroups(groupIds).all().get() groups.size() == 2 }, "Expected to find all groups") - val classicConsumers = client.describeClassicGroups(groupIds.asJavaCollection).all().get() + val classicConsumers = client.describeClassicGroups(groupIds).all().get() val classicConsumer = classicConsumers.get(classicGroupId) assertNotNull(classicConsumer) assertEquals(classicGroupId, classicConsumer.groupId) @@ -2643,9 +2711,36 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kip932")) - def testShareGroups(unused: String): Unit = { + /** + * Verify that initially there are no share groups to list. + */ + private def assertNoShareGroupsExist(): Unit = { + val list = client.listGroups() + assertEquals(0, list.all().get().size()) + assertEquals(0, list.errors().get().size()) + assertEquals(0, list.valid().get().size()) + } + + private def createShareConsumerThread[K,V](consumer: ShareConsumer[K,V], topic: String, latch: CountDownLatch): Thread = { + new Thread { + override def run : Unit = { + consumer.subscribe(util.Set.of(topic)) + try { + while (true) { + consumer.poll(JDuration.ofSeconds(5)) + if (latch.getCount > 0L) + latch.countDown() + consumer.commitSync() + } + } catch { + case _: InterruptException => // Suppress the output to stderr + } + } + } + } + + @Test + def testShareGroups(): Unit = { val testGroupId = "test_group_id" val testClientId = "test_client_id" val fakeGroupId = "fake_group_id" @@ -2661,46 +2756,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val consumerSet = Set(createShareConsumer(configOverrides = createProperties())) val topicSet = Set(testTopicName) - val latch = new CountDownLatch(consumerSet.size) - def createShareConsumerThread[K,V](consumer: ShareConsumer[K,V], topic: String): Thread = { - new Thread { - override def run : Unit = { - consumer.subscribe(Collections.singleton(topic)) - try { - while (true) { - consumer.poll(JDuration.ofSeconds(5)) - if (latch.getCount > 0L) - latch.countDown() - consumer.commitSync() - } - } catch { - case _: InterruptException => // Suppress the output to stderr - } - } - } - } - val config = createConfig client = Admin.create(config) - val producer = createProducer() try { - // Verify that initially there are no share groups to list. - val list = client.listGroups() - assertEquals(0, list.all().get().size()) - assertEquals(0, list.errors().get().size()) - assertEquals(0, list.valid().get().size()) - - client.createTopics(Collections.singleton( - new NewTopic(testTopicName, testNumPartitions, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName), List()) - - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + assertNoShareGroupsExist() + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) // Start consumers in a thread that will subscribe to a new group. - val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2)) + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2, latch)) try { consumerThreads.foreach(_.start()) @@ -2715,21 +2781,21 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId") TestUtils.waitUntilTrue(() => { - val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.STABLE)) + val options = new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE)).inGroupStates(util.Set.of(GroupState.STABLE)) client.listGroups(options).all.get.stream().filter(group => group.groupId == testGroupId && group.groupState.get == GroupState.STABLE).count() == 1 }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.EMPTY)) + val options = new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE)).inGroupStates(util.Set.of(GroupState.EMPTY)) client.listGroups(options).all.get.stream().filter(_.groupId == testGroupId).count() == 0 }, s"Expected to find zero groups") var describeWithFakeGroupResult: DescribeShareGroupsResult = null TestUtils.waitUntilTrue(() => { - describeWithFakeGroupResult = client.describeShareGroups(util.Arrays.asList(testGroupId, fakeGroupId), + describeWithFakeGroupResult = client.describeShareGroups(util.List.of(testGroupId, fakeGroupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) val members = describeWithFakeGroupResult.describedGroups().get(testGroupId).get().members() members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()).nonEmpty @@ -2766,7 +2832,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { describeWithFakeGroupResult.all(), s"Group $fakeGroupId not found.") - val describeTestGroupResult = client.describeShareGroups(Collections.singleton(testGroupId), + val describeTestGroupResult = client.describeShareGroups(util.Set.of(testGroupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) assertEquals(1, describeTestGroupResult.all().get().size()) assertEquals(1, describeTestGroupResult.describedGroups().size()) @@ -2778,7 +2844,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describing a share group using describeConsumerGroups reports it as a non-existent group // but the error message is different - val describeConsumerGroupResult = client.describeConsumerGroups(Collections.singleton(testGroupId), + val describeConsumerGroupResult = client.describeConsumerGroups(util.Set.of(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) assertFutureThrows(classOf[GroupIdNotFoundException], describeConsumerGroupResult.all(), @@ -2792,17 +2858,215 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } finally { consumerSet.foreach(consumer => Utils.closeQuietly(consumer, "consumer")) - Utils.closeQuietly(producer, "producer") Utils.closeQuietly(client, "adminClient") } } @Test - def testElectPreferredLeaders(): Unit = { - client = createAdminClient + def testDeleteShareGroupOffsets(): Unit = { + val config = createConfig + client = Admin.create(config) + val testTopicName = "test_topic" + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + val fakeGroupId = "fake_group_id" + val fakeTopicName = "foo" - val prefer0 = Seq(0, 1, 2) - val prefer1 = Seq(1, 2, 0) + try { + prepareTopics(List(testTopicName), 1) + prepareRecords(testTopicName) + + val newShareConsumerConfig = new Properties(consumerConfig) + newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + + Using.resource(createShareConsumer(configOverrides = newShareConsumerConfig)) { consumer => + consumer.subscribe(util.List.of(testTopicName)) + consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) + consumer.commitSync() + + // listGroups is used to list share groups + // Test that we can list the new group. + TestUtils.waitUntilTrue(() => { + client.listGroups.all.get.stream().filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE).count() == 1 + }, s"Expected to be able to list $testGroupId") + + // Test offset deletion while consuming + val offsetDeleteResult = client.deleteShareGroupOffsets(testGroupId, util.Set.of(testTopicName, fakeTopicName)) + + // Deleting the offset with real group ID should get GroupNotEmptyException + assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.all()) + assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.topicResult(testTopicName)) + assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.topicResult(fakeTopicName)) + + // Test the fake group ID + val fakeDeleteResult = client.deleteShareGroupOffsets(fakeGroupId, util.Set.of(testTopicName, fakeTopicName)) + + assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.all()) + assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.topicResult(testTopicName)) + assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.topicResult(fakeTopicName)) + } + + // Test offset deletion when group is empty + val offsetDeleteResult = client.deleteShareGroupOffsets(testGroupId, util.Set.of(testTopicName, fakeTopicName)) + + assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.all()) + assertNull(offsetDeleteResult.topicResult(testTopicName).get()) + assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.topicResult(fakeTopicName)) + + val tp1 = new TopicPartition(testTopicName, 0) + val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1)))) + .partitionsToOffsetAndMetadata(testGroupId) + .get() + assertTrue(parts.containsKey(tp1)) + assertNull(parts.get(tp1)) + } finally { + Utils.closeQuietly(client, "adminClient") + } + } + + @Test + def testAlterShareGroupOffsets(): Unit = { + val config = createConfig + client = Admin.create(config) + val testTopicName = "test_topic" + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + val fakeGroupId = "fake_group_id" + val fakeTopicName = "foo" + + val tp1 = new TopicPartition(testTopicName, 0) + val tp2 = new TopicPartition(fakeTopicName, 0) + try { + prepareTopics(List(testTopicName), 1) + prepareRecords(testTopicName) + + val newShareConsumerConfig = new Properties(consumerConfig) + newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + + Using.resource(createShareConsumer(configOverrides = newShareConsumerConfig)) { consumer => + consumer.subscribe(util.List.of(testTopicName)) + consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) + consumer.commitSync() + + // listGroups is used to list share groups + // Test that we can list the new group. + TestUtils.waitUntilTrue(() => { + client.listGroups.all.get.stream().filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE).count() == 1 + }, s"Expected to be able to list $testGroupId") + + // Test offset alter while consuming + val offsetAlterResult = client.alterShareGroupOffsets(testGroupId, util.Map.of(tp1, 0, tp2, 0)) + + // Altering the offset with real group ID should get GroupNotEmptyException + assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.all()) + assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.partitionResult(tp1)) + assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.partitionResult(tp2)) + + // Test the fake group ID + val fakeAlterResult = client.alterShareGroupOffsets(fakeGroupId, util.Map.of(tp1, 0, tp2, 0)) + + assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.all()) + assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.partitionResult(tp1)) + assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.partitionResult(tp2)) + } + + // Test offset alter when group is empty + val offsetAlterResult = client.alterShareGroupOffsets(testGroupId, util.Map.of(tp1, 0, tp2, 0)) + + assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetAlterResult.all()) + assertNull(offsetAlterResult.partitionResult(tp1).get()) + assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetAlterResult.partitionResult(tp2)) + + val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1)))) + .partitionsToOffsetAndMetadata(testGroupId) + .get() + assertTrue(parts.containsKey(tp1)) + assertEquals(0, parts.get(tp1).offset()) + } finally { + Utils.closeQuietly(client, "adminClient") + } + } + + @Test + def testListShareGroupOffsets(): Unit = { + val config = createConfig + client = Admin.create(config) + val testTopicName = "test_topic" + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + + val newShareConsumerConfig = new Properties(consumerConfig) + newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + val consumerSet = Set(createShareConsumer(configOverrides = newShareConsumerConfig)) + val topicSet = Set(testTopicName) + val latch = new CountDownLatch(consumerSet.size) + + try { + assertNoShareGroupsExist() + prepareTopics(List(testTopicName), 2) + prepareRecords(testTopicName) + + // Start consumers in a thread that will subscribe to a new group. + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2, latch)) + try { + consumerThreads.foreach(_.start()) + assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) + val tp1 = new TopicPartition(testTopicName, 0) + val tp2 = new TopicPartition(testTopicName, 1) + + // Test listShareGroupOffsets + TestUtils.waitUntilTrue(() => { + val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec())) + .partitionsToOffsetAndMetadata(testGroupId) + .get() + parts.containsKey(tp1) && parts.containsKey(tp2) + }, "Expected the result contains all partitions.") + + // Test listShareGroupOffsets with listShareGroupOffsetsSpec + val groupSpecs = util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1))) + val parts = client.listShareGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata(testGroupId).get() + assertTrue(parts.containsKey(tp1)) + assertFalse(parts.containsKey(tp2)) + } finally { + consumerThreads.foreach { + case consumerThread => + consumerThread.interrupt() + consumerThread.join() + } + } + } finally { + consumerSet.foreach(consumer => Utils.closeQuietly(consumer, "consumer")) + Utils.closeQuietly(client, "adminClient") + } + } + + /** + * Waits until the metadata for the given partition has fully propagated and become consistent across all brokers. + * + * @param partition The partition whose leader metadata should be verified across all brokers. + */ + def waitForBrokerMetadataPropagation(partition: TopicPartition): Unit = { + while (brokers.exists(_.metadataCache.getPartitionLeaderEndpoint(partition.topic, partition.partition(), listenerName).isEmpty) || + brokers.map(_.metadataCache.getPartitionLeaderEndpoint(partition.topic, partition.partition(), listenerName)) + .filter(_.isPresent) + .map(_.get()) + .toSet.size != 1) + TimeUnit.MILLISECONDS.sleep(300) + } + + @Test + def testElectPreferredLeaders(): Unit = { + client = createAdminClient + + val prefer0 = Seq(0, 1, 2) + val prefer1 = Seq(1, 2, 0) val prefer2 = Seq(2, 0, 1) val partition1 = new TopicPartition("elect-preferred-leaders-topic-1", 0) @@ -2823,12 +3087,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val prior1 = brokers.head.metadataCache.getPartitionLeaderEndpoint(partition1.topic, partition1.partition(), listenerName).get.id() val prior2 = brokers.head.metadataCache.getPartitionLeaderEndpoint(partition2.topic, partition2.partition(), listenerName).get.id() - var m = Map.empty[TopicPartition, Optional[NewPartitionReassignment]] + var reassignmentMap = Map.empty[TopicPartition, Optional[NewPartitionReassignment]] if (prior1 != preferred) - m += partition1 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) + reassignmentMap += partition1 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) if (prior2 != preferred) - m += partition2 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) - client.alterPartitionReassignments(m.asJava).all().get() + reassignmentMap += partition2 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) + client.alterPartitionReassignments(reassignmentMap.asJava).all().get() TestUtils.waitUntilTrue( () => preferredLeader(partition1) == preferred && preferredLeader(partition2) == preferred, @@ -2844,7 +3108,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, 0) // Noop election - var electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava) + var electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1)) val exception = electResult.partitions.get.get(partition1).get assertEquals(classOf[ElectionNotNeededException], exception.getClass) TestUtils.assertLeader(client, partition1, 0) @@ -2856,11 +3120,13 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, 0) // Now change the preferred leader to 1 + waitForBrokerMetadataPropagation(partition1) + waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer1) // meaningful election - electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava) - assertEquals(Set(partition1).asJava, electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1)) + assertEquals(util.Set.of(partition1), electResult.partitions.get.keySet) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) TestUtils.assertLeader(client, partition1, 1) @@ -2887,32 +3153,38 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // unknown topic val unknownPartition = new TopicPartition("topic-does-not-exist", 0) - electResult = client.electLeaders(ElectionType.PREFERRED, Set(unknownPartition).asJava) - assertEquals(Set(unknownPartition).asJava, electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(unknownPartition)) + assertEquals(util.Set.of(unknownPartition), electResult.partitions.get.keySet) assertUnknownTopicOrPartition(unknownPartition, electResult) TestUtils.assertLeader(client, partition1, 1) TestUtils.assertLeader(client, partition2, 1) // Now change the preferred leader to 2 + waitForBrokerMetadataPropagation(partition1) + waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer2) // mixed results - electResult = client.electLeaders(ElectionType.PREFERRED, Set(unknownPartition, partition1).asJava) - assertEquals(Set(unknownPartition, partition1).asJava, electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(unknownPartition, partition1)) + assertEquals(util.Set.of(unknownPartition, partition1), electResult.partitions.get.keySet) TestUtils.assertLeader(client, partition1, 2) TestUtils.assertLeader(client, partition2, 1) assertUnknownTopicOrPartition(unknownPartition, electResult) // elect preferred leader for partition 2 - electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition2).asJava) - assertEquals(Set(partition2).asJava, electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition2)) + assertEquals(util.Set.of(partition2), electResult.partitions.get.keySet) assertFalse(electResult.partitions.get.get(partition2).isPresent) TestUtils.assertLeader(client, partition2, 2) // Now change the preferred leader to 1 + waitForBrokerMetadataPropagation(partition1) + waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer1) // but shut it down... killBroker(1) + waitForBrokerMetadataPropagation(partition1) + waitForBrokerMetadataPropagation(partition2) TestUtils.waitForBrokersOutOfIsr(client, Set(partition1, partition2), Set(1)) def assertPreferredLeaderNotAvailable( @@ -2928,8 +3200,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // ... now what happens if we try to elect the preferred leader and it's down? val shortTimeout = new ElectLeadersOptions().timeoutMs(10000) - electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava, shortTimeout) - assertEquals(Set(partition1).asJava, electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1), shortTimeout) + assertEquals(util.Set.of(partition1), electResult.partitions.get.keySet) assertPreferredLeaderNotAvailable(partition1, electResult) TestUtils.assertLeader(client, partition1, 2) @@ -2949,6 +3221,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersForOnePartition(): Unit = { // Case: unclean leader election with one topic partition client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -2966,7 +3239,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) TestUtils.assertLeader(client, partition1, broker2) @@ -2976,6 +3249,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersForManyPartitions(): Unit = { // Case: unclean leader election with many topic partitions client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3002,7 +3276,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1, partition2).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1, partition2)) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) electResult.partitions.get.get(partition2) @@ -3015,6 +3289,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersForAllPartitions(): Unit = { // Case: noop unclean leader election and valid unclean leader election for all partitions client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3054,6 +3329,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersForUnknownPartitions(): Unit = { // Case: unclean leader election for unknown topic client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3070,7 +3346,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, new TopicPartition(topic, 0), broker1) - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(unknownPartition, unknownTopic).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(unknownPartition, unknownTopic)) assertTrue(electResult.partitions.get.get(unknownPartition).get.isInstanceOf[UnknownTopicOrPartitionException]) assertTrue(electResult.partitions.get.get(unknownTopic).get.isInstanceOf[UnknownTopicOrPartitionException]) } @@ -3079,6 +3355,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersWhenNoLiveBrokers(): Unit = { // Case: unclean leader election with no live brokers client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3099,7 +3376,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { killBroker(broker1) TestUtils.assertNoLeader(client, partition1) - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) assertTrue(electResult.partitions.get.get(partition1).get.isInstanceOf[EligibleLeadersNotAvailableException]) } @@ -3107,6 +3384,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersNoop(): Unit = { // Case: noop unclean leader election with explicit topic partitions client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3126,7 +3404,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition1, broker2) brokers(broker1).startup() - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) assertTrue(electResult.partitions.get.get(partition1).get.isInstanceOf[ElectionNotNeededException]) } @@ -3134,6 +3412,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testElectUncleanLeadersAndNoop(): Unit = { // Case: one noop unclean leader election and one valid unclean leader election client = createAdminClient + disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3161,7 +3440,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1, partition2).asJava) + val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1, partition2)) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) assertTrue(electResult.partitions.get.get(partition2).get.isInstanceOf[ElectionNotNeededException]) @@ -3178,7 +3457,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic, replicationFactor = 3) val tp = new TopicPartition(topic, 0) - val reassignmentsMap = client.listPartitionReassignments(Set(tp).asJava).reassignments().get() + val reassignmentsMap = client.listPartitionReassignments(util.Set.of(tp)).reassignments().get() assertEquals(0, reassignmentsMap.size()) val allReassignmentsMap = client.listPartitionReassignments().reassignments().get() @@ -3192,7 +3471,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val topic = "list-reassignments-no-reassignments" val tp = new TopicPartition(topic, 0) - val reassignmentsMap = client.listPartitionReassignments(Set(tp).asJava).reassignments().get() + val reassignmentsMap = client.listPartitionReassignments(util.Set.of(tp)).reassignments().get() assertEquals(0, reassignmentsMap.size()) val allReassignmentsMap = client.listPartitionReassignments().reassignments().get() @@ -3216,31 +3495,31 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic2) // Alter topic configs - var topic1AlterConfigs = Seq( + var topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE), AlterConfigOp.OpType.APPEND), new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ).asJavaCollection + ) // Test SET and APPEND on previously unset properties - var topic2AlterConfigs = Seq( + var topic2AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.APPEND) - ).asJavaCollection + ) - var alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs, - topic2Resource -> topic2AlterConfigs - ).asJava) + var alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs, + topic2Resource, topic2AlterConfigs + )) - assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) alterResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - var describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) + var describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) var configs = describeResult.all.get assertEquals(2, configs.size) @@ -3254,27 +3533,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals("delete,compact", configs.get(topic2Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value) // verify subtract operation, including from an empty property - topic1AlterConfigs = Seq( + topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.SUBTRACT), new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, "0"), AlterConfigOp.OpType.SUBTRACT) - ).asJava + ) // subtract all from this list property - topic2AlterConfigs = Seq( + topic2AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE), AlterConfigOp.OpType.SUBTRACT) - ).asJavaCollection + ) - alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs, - topic2Resource -> topic2AlterConfigs - ).asJava) - assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs, + topic2Resource, topic2AlterConfigs + )) + assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) alterResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) + describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) configs = describeResult.all.get assertEquals(2, configs.size) @@ -3285,29 +3564,29 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals("", configs.get(topic2Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value ) // Alter topics with validateOnly=true - topic1AlterConfigs = Seq( + topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.APPEND) - ).asJava + ) - alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs + ), new AlterConfigsOptions().validateOnly(true)) alterResult.all.get // Verify that topics were not updated due to validateOnly = true - describeResult = client.describeConfigs(Seq(topic1Resource).asJava) + describeResult = client.describeConfigs(util.List.of(topic1Resource)) configs = describeResult.all.get assertEquals("delete", configs.get(topic1Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value) // Alter topics with validateOnly=true with invalid configs - topic1AlterConfigs = Seq( + topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "zip"), AlterConfigOp.OpType.SET) - ).asJava + ) - alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs + ), new AlterConfigsOptions().validateOnly(true)) assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values().get(topic1Resource), "Invalid value zip for configuration compression.type: String must be one of: uncompressed, zstd, lz4, snappy, gzip, producer") @@ -3330,24 +3609,24 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic, numPartitions = 1, replicationFactor = 1, topicCreateConfigs) // Append value that is already present - val topicAppendConfigs = Seq( + val topicAppendConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, appendValues), AlterConfigOp.OpType.APPEND), - ).asJavaCollection + ) - val appendResult = client.incrementalAlterConfigs(Map(topicResource -> topicAppendConfigs).asJava) + val appendResult = client.incrementalAlterConfigs(util.Map.of(topicResource, topicAppendConfigs)) appendResult.all.get // Subtract values that are not present - val topicSubtractConfigs = Seq( + val topicSubtractConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, subtractValues), AlterConfigOp.OpType.SUBTRACT) - ).asJavaCollection - val subtractResult = client.incrementalAlterConfigs(Map(topicResource -> topicSubtractConfigs).asJava) + ) + val subtractResult = client.incrementalAlterConfigs(util.Map.of(topicResource, topicSubtractConfigs)) subtractResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - val describeResult = client.describeConfigs(Seq(topicResource).asJava) + val describeResult = client.describeConfigs(util.List.of(topicResource)) val configs = describeResult.all.get assertEquals(appendValues, configs.get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG).value) @@ -3357,32 +3636,32 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testIncrementalAlterConfigsDeleteAndSetBrokerConfigs(): Unit = { client = createAdminClient val broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0") - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "456"), AlterConfigOp.OpType.SET) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). + val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - ("123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, ""))) + "123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) }, "Expected to see the broker properties we just set", pause=25) - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "654"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "987"), AlterConfigOp.OpType.SET) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). + val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - ("".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + "".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "654".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "987".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) + "987".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) }, "Expected to see the broker properties we just modified", pause=25) } @@ -3390,35 +3669,35 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testIncrementalAlterConfigsDeleteBrokerConfigs(): Unit = { client = createAdminClient val broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0") - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "456"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "789"), AlterConfigOp.OpType.SET) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). + val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - ("123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + "123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "789".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) + "789".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) }, "Expected to see the broker properties we just set", pause=25) - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). + val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - ("".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + "".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) + "".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) }, "Expected to see the broker properties we just removed to be deleted", pause=25) } @@ -3436,22 +3715,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic2) // Add duplicate Keys for topic1 - var topic1AlterConfigs = Seq( + var topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.75"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.65"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), AlterConfigOp.OpType.SET) // valid entry - ).asJavaCollection + ) // Add valid config for topic2 - var topic2AlterConfigs = Seq( + var topic2AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) - var alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs, - topic2Resource -> topic2AlterConfigs - ).asJava) - assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) + var alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs, + topic2Resource, topic2AlterConfigs + )) + assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) // InvalidRequestException error for topic1 assertFutureThrows(classOf[InvalidRequestException], alterResult.values().get(topic1Resource), @@ -3462,7 +3741,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ensureConsistentKRaftMetadata() // Verify that topic1 is not config not updated, and topic2 config is updated - val describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) + val describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) val configs = describeResult.all.get assertEquals(2, configs.size) @@ -3471,19 +3750,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals("0.9", configs.get(topic2Resource).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) // Check invalid use of append/subtract operation types - topic1AlterConfigs = Seq( + topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), AlterConfigOp.OpType.APPEND) - ).asJavaCollection + ) - topic2AlterConfigs = Seq( + topic2AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), AlterConfigOp.OpType.SUBTRACT) - ).asJavaCollection + ) - alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs, - topic2Resource -> topic2AlterConfigs - ).asJava) - assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs, + topic2Resource, topic2AlterConfigs + )) + assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) assertFutureThrows(classOf[InvalidConfigurationException],alterResult.values().get(topic1Resource), "Can't APPEND to key compression.type because its type is not LIST.") @@ -3492,14 +3771,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { "Can't SUBTRACT to key compression.type because its type is not LIST.") // Try to add invalid config - topic1AlterConfigs = Seq( + topic1AlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) - alterResult = client.incrementalAlterConfigs(Map( - topic1Resource -> topic1AlterConfigs - ).asJava) - assertEquals(Set(topic1Resource).asJava, alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(util.Map.of( + topic1Resource, topic1AlterConfigs + )) + assertEquals(util.Set.of(topic1Resource), alterResult.values.keySet) assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values().get(topic1Resource), "Invalid value 1.1 for configuration min.cleanable.dirty.ratio: Value must be no more than 1") @@ -3520,14 +3799,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val alterOptions = new AlterPartitionReassignmentsOptions alterOptions.allowReplicationFactorChange(false) - val alterReplicaNumberTo1 = Optional.of(new NewPartitionReassignment(List(1.asInstanceOf[Integer]).asJava)) + val alterReplicaNumberTo1 = Optional.of(new NewPartitionReassignment(util.List.of(1.asInstanceOf[Integer]))) val alterReplicaNumberTo2 = Optional.of(new NewPartitionReassignment((0 until brokerCount - 1).map(_.asInstanceOf[Integer]).asJava)) val alterReplicaNumberTo3 = Optional.of(new NewPartitionReassignment((0 until brokerCount).map(_.asInstanceOf[Integer]).asJava)) - val alterReplicaResults = client.alterPartitionReassignments(Map( - tp1 -> alterReplicaNumberTo1, - tp2 -> alterReplicaNumberTo2, - tp3 -> alterReplicaNumberTo3, - ).asJava, alterOptions).values() + val alterReplicaResults = client.alterPartitionReassignments(util.Map.of( + tp1, alterReplicaNumberTo1, + tp2, alterReplicaNumberTo2, + tp3, alterReplicaNumberTo3, + ), alterOptions).values() assertDoesNotThrow(() => alterReplicaResults.get(tp2).get()) assertEquals("The replication factor is changed from 2 to 1", assertFutureThrows(classOf[InvalidReplicationFactorException], alterReplicaResults.get(tp1)).getMessage) @@ -3536,24 +3815,24 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val nonExistentTp1 = new TopicPartition("topicA", 0) val nonExistentTp2 = new TopicPartition(topic, 4) - val nonExistentPartitionsResult = client.alterPartitionReassignments(Map( - tp1 -> validAssignment, - tp2 -> validAssignment, - tp3 -> validAssignment, - nonExistentTp1 -> validAssignment, - nonExistentTp2 -> validAssignment - ).asJava).values() + val nonExistentPartitionsResult = client.alterPartitionReassignments(util.Map.of( + tp1, validAssignment, + tp2, validAssignment, + tp3, validAssignment, + nonExistentTp1, validAssignment, + nonExistentTp2, validAssignment + )).values() assertFutureThrows(classOf[UnknownTopicOrPartitionException], nonExistentPartitionsResult.get(nonExistentTp1)) assertFutureThrows(classOf[UnknownTopicOrPartitionException], nonExistentPartitionsResult.get(nonExistentTp2)) val extraNonExistentReplica = Optional.of(new NewPartitionReassignment((0 until brokerCount + 1).map(_.asInstanceOf[Integer]).asJava)) val negativeIdReplica = Optional.of(new NewPartitionReassignment(Seq(-3, -2, -1).map(_.asInstanceOf[Integer]).asJava)) val duplicateReplica = Optional.of(new NewPartitionReassignment(Seq(0, 1, 1).map(_.asInstanceOf[Integer]).asJava)) - val invalidReplicaResult = client.alterPartitionReassignments(Map( - tp1 -> extraNonExistentReplica, - tp2 -> negativeIdReplica, - tp3 -> duplicateReplica - ).asJava).values() + val invalidReplicaResult = client.alterPartitionReassignments(util.Map.of( + tp1, extraNonExistentReplica, + tp2, negativeIdReplica, + tp3, duplicateReplica + )).values() assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp1)) assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp2)) assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp3)) @@ -3564,16 +3843,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val client = createAdminClient val longTopicName = String.join("", Collections.nCopies(249, "x")) val invalidTopicName = String.join("", Collections.nCopies(250, "x")) - val newTopics2 = Seq(new NewTopic(invalidTopicName, 3, 3.toShort), + val newTopics2 = util.List.of(new NewTopic(invalidTopicName, 3, 3.toShort), new NewTopic(longTopicName, 3, 3.toShort)) - val results = client.createTopics(newTopics2.asJava).values() + val results = client.createTopics(newTopics2).values() assertTrue(results.containsKey(longTopicName)) results.get(longTopicName).get() assertTrue(results.containsKey(invalidTopicName)) assertFutureThrows(classOf[InvalidTopicException], results.get(invalidTopicName)) assertFutureThrows(classOf[InvalidTopicException], client.alterReplicaLogDirs( - Map(new TopicPartitionReplica(longTopicName, 0, 0) -> brokers(0).config.logDirs(0)).asJava).all()) + util.Map.of(new TopicPartitionReplica(longTopicName, 0, 0), brokers(0).config.logDirs.get(0))).all()) client.close() } @@ -3584,7 +3863,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def validateLogConfig(compressionType: String): Unit = { ensureConsistentKRaftMetadata() val topicProps = brokers.head.metadataCache.topicConfig(topic) - val logConfig = LogConfig.fromProps(Collections.emptyMap[String, AnyRef], topicProps) + val logConfig = LogConfig.fromProps(util.Map.of[String, AnyRef], topicProps) assertEquals(compressionType, logConfig.originals.get(TopicConfig.COMPRESSION_TYPE_CONFIG)) assertNull(logConfig.originals.get(TopicConfig.RETENTION_BYTES_CONFIG)) @@ -3598,22 +3877,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ).asJava val newTopic = new NewTopic(topic, 2, brokerCount.toShort) assertFutureThrows(classOf[InvalidConfigurationException], - client.createTopics(Collections.singletonList(newTopic.configs(invalidConfigs))).all, + client.createTopics(util.List.of(newTopic.configs(invalidConfigs))).all, "Null value not supported for topic configs: retention.bytes" ) - val validConfigs = Map[String, String](TopicConfig.COMPRESSION_TYPE_CONFIG -> "producer").asJava - client.createTopics(Collections.singletonList(newTopic.configs(validConfigs))).all.get() + val validConfigs = util.Map.of[String, String](TopicConfig.COMPRESSION_TYPE_CONFIG, "producer") + client.createTopics(util.List.of(newTopic.configs(validConfigs))).all.get() waitForTopics(client, expectedPresent = Seq(topic), expectedMissing = List()) validateLogConfig(compressionType = "producer") val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val alterOps = Seq( + val alterOps = util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, null), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), AlterConfigOp.OpType.SET) ) assertFutureThrows(classOf[InvalidRequestException], - client.incrementalAlterConfigs(Map(topicResource -> alterOps.asJavaCollection).asJava).all, + client.incrementalAlterConfigs(util.Map.of(topicResource, alterOps)).all, "Null value not supported for : retention.bytes" ) validateLogConfig(compressionType = "producer") @@ -3647,9 +3926,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val initialReplicaManagerLogLevel = initialLoggerConfig.get("kafka.server.ReplicaManager").value() val newAncestorLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL - val alterAncestorLoggerEntry = Seq( + val alterAncestorLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry(ancestorLogger, newAncestorLogLevel), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) // Test validateOnly does not change anything alterBrokerLoggers(alterAncestorLoggerEntry, validateOnly = true) val validatedLoggerConfig = describeBrokerLoggers() @@ -3667,19 +3946,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(newAncestorLogLevel, changedAncestorLoggerConfig.get("kafka.server.ReplicaManager").value()) // alter the LogCleaner's logger so we can later test resetting it - val alterLogCleanerLoggerEntry = Seq( + val alterLogCleanerLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) alterBrokerLoggers(alterLogCleanerLoggerEntry) val changedBrokerLoggerConfig = describeBrokerLoggers() assertEquals(LogLevelConfig.ERROR_LOG_LEVEL, changedBrokerLoggerConfig.get("kafka.log.LogCleaner").value()) // properly test various set operations and one delete - val alterLogLevelsEntries = Seq( + val alterLogLevelsEntries = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", LogLevelConfig.TRACE_LOG_LEVEL), AlterConfigOp.OpType.SET), - ).asJavaCollection + ) alterBrokerLoggers(alterLogLevelsEntries) val alteredLoggerConfig = describeBrokerLoggers() assertEquals(newAncestorLogLevel, alteredLoggerConfig.get(ancestorLogger).value()) @@ -3701,36 +3980,36 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val ancestorLogger = "kafka" // step 1 - configure kafka logger val initialAncestorLogLevel = LogLevelConfig.TRACE_LOG_LEVEL - val alterAncestorLoggerEntry = Seq( + val alterAncestorLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry(ancestorLogger, initialAncestorLogLevel), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) alterBrokerLoggers(alterAncestorLoggerEntry) val initialLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, initialLoggerConfig.get(ancestorLogger).value()) assertEquals(initialAncestorLogLevel, initialLoggerConfig.get("kafka.server.ControllerServer").value()) // step 2 - change ControllerServer logger to INFO - val alterControllerLoggerEntry = Seq( + val alterControllerLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) alterBrokerLoggers(alterControllerLoggerEntry) val changedControllerLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, changedControllerLoggerConfig.get(ancestorLogger).value()) assertEquals(LogLevelConfig.INFO_LOG_LEVEL, changedControllerLoggerConfig.get("kafka.server.ControllerServer").value()) // step 3 - unset ControllerServer logger - val deleteControllerLoggerEntry = Seq( + val deleteControllerLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", ""), AlterConfigOp.OpType.DELETE) - ).asJavaCollection + ) alterBrokerLoggers(deleteControllerLoggerEntry) val deletedControllerLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, deletedControllerLoggerConfig.get(ancestorLogger).value()) assertEquals(initialAncestorLogLevel, deletedControllerLoggerConfig.get("kafka.server.ControllerServer").value()) val newAncestorLogLevel = LogLevelConfig.ERROR_LOG_LEVEL - val newAlterAncestorLoggerEntry = Seq( + val newAlterAncestorLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry(ancestorLogger, newAncestorLogLevel), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) alterBrokerLoggers(newAlterAncestorLoggerEntry) val newAncestorLoggerConfig = describeBrokerLoggers() assertEquals(newAncestorLogLevel, newAncestorLoggerConfig.get(ancestorLogger).value()) @@ -3744,9 +4023,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val initialRootLogLevel = initialLoggerConfig.get(LoggingController.ROOT_LOGGER).value() val newRootLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL - val alterRootLoggerEntry = Seq( + val alterRootLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry(LoggingController.ROOT_LOGGER, newRootLogLevel), AlterConfigOp.OpType.SET) - ).asJavaCollection + ) alterBrokerLoggers(alterRootLoggerEntry, validateOnly = true) val validatedRootLoggerConfig = describeBrokerLoggers() @@ -3760,9 +4039,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @Test def testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger(): Unit = { client = createAdminClient - val deleteRootLoggerEntry = Seq( + val deleteRootLoggerEntry = util.List.of( new AlterConfigOp(new ConfigEntry(LoggingController.ROOT_LOGGER, ""), AlterConfigOp.OpType.DELETE) - ).asJavaCollection + ) assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(deleteRootLoggerEntry)).getCause.isInstanceOf[InvalidRequestException]) } @@ -3776,42 +4055,42 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(expectedValidLoggerLogLevel, describeBrokerLoggers().get(validLoggerName)) } - val appendLogLevelEntries = Seq( + val appendLogLevelEntries = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.APPEND) // append is not supported - ).asJavaCollection + ) assertInstanceOf(classOf[InvalidRequestException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(appendLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val subtractLogLevelEntries = Seq( + val subtractLogLevelEntries = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SUBTRACT) // subtract is not supported - ).asJavaCollection + ) assertInstanceOf(classOf[InvalidRequestException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(subtractLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val invalidLogLevelLogLevelEntries = Seq( + val invalidLogLevelLogLevelEntries = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", "OFF"), AlterConfigOp.OpType.SET) // OFF is not a valid log level - ).asJavaCollection + ) assertInstanceOf(classOf[InvalidConfigurationException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLogLevelLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val invalidLoggerNameLogLevelEntries = Seq( + val invalidLoggerNameLogLevelEntries = util.List.of( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("Some Other LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET) // invalid logger name is not supported - ).asJavaCollection + ) assertInstanceOf(classOf[InvalidConfigurationException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLoggerNameLogLevelEntries)).getCause) assertLogLevelDidNotChange() } def alterBrokerLoggers(entries: util.Collection[AlterConfigOp], validateOnly: Boolean = false): Unit = { - client.incrementalAlterConfigs(Map(brokerLoggerConfigResource -> entries).asJava, new AlterConfigsOptions().validateOnly(validateOnly)) + client.incrementalAlterConfigs(util.Map.of(brokerLoggerConfigResource, entries), new AlterConfigsOptions().validateOnly(validateOnly)) .values.get(brokerLoggerConfigResource).get() } def describeBrokerLoggers(): Config = - client.describeConfigs(Collections.singletonList(brokerLoggerConfigResource)).values.get(brokerLoggerConfigResource).get() + client.describeConfigs(util.List.of(brokerLoggerConfigResource)).values.get(brokerLoggerConfigResource).get() @Test def testAppendConfigToEmptyDefaultValue(): Unit = { @@ -3825,34 +4104,42 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { testAppendConfig(props, "0:0", "1:1,0:0") } + private def disableEligibleLeaderReplicas(admin: Admin): Unit = { + if (metadataVersion.isAtLeast(MetadataVersion.IBP_4_1_IV0)) { + admin.updateFeatures( + util.Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, new FeatureUpdate(0, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE)), + new UpdateFeaturesOptions()).all().get() + } + } + private def testAppendConfig(props: Properties, append: String, expected: String): Unit = { client = createAdminClient createTopic(topic, topicConfig = props) val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val topicAlterConfigs = Seq( + val topicAlterConfigs = util.List.of( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, append), AlterConfigOp.OpType.APPEND), - ).asJavaCollection + ) - val alterResult = client.incrementalAlterConfigs(Map( - topicResource -> topicAlterConfigs - ).asJava) + val alterResult = client.incrementalAlterConfigs(util.Map.of( + topicResource, topicAlterConfigs + )) alterResult.all().get(15, TimeUnit.SECONDS) ensureConsistentKRaftMetadata() - val config = client.describeConfigs(List(topicResource).asJava).all().get().get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) + val config = client.describeConfigs(util.List.of(topicResource)).all().get().get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) assertEquals(expected, config.value()) } @Test def testListClientMetricsResources(): Unit = { client = createAdminClient - client.createTopics(Collections.singleton(new NewTopic(topic, partition, 0.toShort))) + client.createTopics(util.Set.of(new NewTopic(topic, partition, 0.toShort))) assertTrue(client.listClientMetricsResources().all().get().isEmpty) val name = "name" val configResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name) val configEntry = new ConfigEntry("interval.ms", "111") val configOp = new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET) - client.incrementalAlterConfigs(Collections.singletonMap(configResource, Collections.singletonList(configOp))).all().get() + client.incrementalAlterConfigs(util.Map.of(configResource, util.List.of(configOp))).all().get() TestUtils.waitUntilTrue(() => { val results = client.listClientMetricsResources().all().get() results.size() == 1 && results.iterator().next().equals(new ClientMetricsResourceListing(name)) @@ -3871,6 +4158,92 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally client.close(time.Duration.ZERO) } + @Test + def testListConfigResources(): Unit = { + client = createAdminClient + + // Alter group and client metric config to add group and client metric config resource + val clientMetric = "client-metrics" + val group = "group" + val clientMetricResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, clientMetric) + val groupResource = new ConfigResource(ConfigResource.Type.GROUP, group) + val alterResult = client.incrementalAlterConfigs(util.Map.of( + clientMetricResource, + util.Set.of(new AlterConfigOp(new ConfigEntry("interval.ms", "111"), AlterConfigOp.OpType.SET)), + groupResource, + util.Set.of(new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"), AlterConfigOp.OpType.SET)) + )) + assertEquals(util.Set.of(clientMetricResource, groupResource), alterResult.values.keySet) + alterResult.all.get(15, TimeUnit.SECONDS) + + ensureConsistentKRaftMetadata() + + // non-specified config resource type retrieves all config resources + var configResources = client.listConfigResources().all().get() + assertEquals(9, configResources.size()) + brokerServers.foreach(b => { + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) + }) + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) + assertTrue(configResources.contains(groupResource)) + assertTrue(configResources.contains(clientMetricResource)) + + // BROKER config resource type retrieves only broker config resources + configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.BROKER), new ListConfigResourcesOptions()).all().get() + assertEquals(3, configResources.size()) + brokerServers.foreach(b => { + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) + assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) + }) + assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) + assertFalse(configResources.contains(groupResource)) + assertFalse(configResources.contains(clientMetricResource)) + + // BROKER_LOGGER config resource type retrieves only broker logger config resources + configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.BROKER_LOGGER), new ListConfigResourcesOptions()).all().get() + assertEquals(3, configResources.size()) + brokerServers.foreach(b => { + assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) + }) + assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) + assertFalse(configResources.contains(groupResource)) + assertFalse(configResources.contains(clientMetricResource)) + + // TOPIC config resource type retrieves only topic config resources + configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.TOPIC), new ListConfigResourcesOptions()).all().get() + assertEquals(1, configResources.size()) + assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) + + // GROUP config resource type retrieves only group config resources + configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions()).all().get() + assertEquals(1, configResources.size()) + assertTrue(configResources.contains(groupResource)) + + // CLIENT_METRICS config resource type retrieves only client metric config resources + configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions()).all().get() + assertEquals(1, configResources.size()) + assertTrue(configResources.contains(clientMetricResource)) + + // UNKNOWN config resource type gets UNSUPPORTED_VERSION error + assertThrows(classOf[ExecutionException], () => { + client.listConfigResources(util.Set.of(ConfigResource.Type.UNKNOWN), new ListConfigResourcesOptions()).all().get() + }) + } + + @Test + @Timeout(30) + def testListConfigResourcesTimeoutMs(): Unit = { + client = createInvalidAdminClient() + try { + val timeoutOption = new ListConfigResourcesOptions().timeoutMs(0) + val exception = assertThrows(classOf[ExecutionException], () => + client.listConfigResources(util.Set.of(), timeoutOption).all().get()) + assertInstanceOf(classOf[TimeoutException], exception.getCause) + } finally client.close(time.Duration.ZERO) + } + /** * Test that createTopics returns the dynamic configurations of the topics that were created. * @@ -3896,8 +4269,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val controllerNodeResource = new ConfigResource(ConfigResource.Type.BROKER, controllerServer.config.nodeId.toString) controllerServer.controller.incrementalAlterConfigs(ANONYMOUS_CONTEXT, - Collections.singletonMap(controllerNodeResource, - Collections.singletonMap(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP, + util.Map.of(controllerNodeResource, + util.Map.of(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP, new SimpleImmutableEntry(AlterConfigOp.OpType.SET, "34"))), false).get() ensureConsistentKRaftMetadata() @@ -3911,11 +4284,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { s"Timed out waiting for change to ${ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG}", waitTimeMs = 60000L) - val newTopics = Seq(new NewTopic("foo", Map((0: Integer) -> Seq[Integer](1, 2).asJava, - (1: Integer) -> Seq[Integer](2, 0).asJava).asJava). - configs(Collections.singletonMap(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999")), + val newTopics = Seq(new NewTopic("foo", util.Map.of(0: Integer, util.List.of[Integer](1, 2), + 1: Integer, util.List.of[Integer](2, 0))). + configs(util.Map.of(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999")), new NewTopic("bar", 3, 3.toShort), - new NewTopic("baz", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) + new NewTopic("baz", Optional.empty[Integer], Optional.empty[java.lang.Short]) ) val result = client.createTopics(newTopics.asJava) result.all.get() @@ -3932,33 +4305,33 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // From the topic configuration defaults. assertEquals(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, "delete", - ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), null, null), + ConfigSource.DEFAULT_CONFIG, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG)) // From dynamic cluster config via the synonym LogRetentionTimeHoursProp. assertEquals(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "10800000", - ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), + ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.RETENTION_MS_CONFIG)) // From dynamic broker config via LogCleanerDeleteRetentionMsProp. assertEquals(new ConfigEntry(TopicConfig.DELETE_RETENTION_MS_CONFIG, "34", - ConfigSource.DYNAMIC_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), + ConfigSource.DYNAMIC_BROKER_CONFIG, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.DELETE_RETENTION_MS_CONFIG)) // From static broker config by SegmentJitterMsProp. assertEquals(new ConfigEntry(TopicConfig.SEGMENT_JITTER_MS_CONFIG, "123", - ConfigSource.STATIC_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), + ConfigSource.STATIC_BROKER_CONFIG, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.SEGMENT_JITTER_MS_CONFIG)) // From static broker config by the synonym LogRollTimeHoursProp. val segmentMsPropType = ConfigSource.STATIC_BROKER_CONFIG assertEquals(new ConfigEntry(TopicConfig.SEGMENT_MS_CONFIG, "7200000", - segmentMsPropType, false, false, Collections.emptyList(), null, null), + segmentMsPropType, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.SEGMENT_MS_CONFIG)) // From the dynamic topic config. assertEquals(new ConfigEntry(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999", - ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, Collections.emptyList(), null, null), + ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, util.List.of, null, null), topicConfigs.get(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG)) } @@ -4002,7 +4375,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { private def createConsumerThread[K,V](consumer: Consumer[K,V], topic: String): Thread = { new Thread { override def run : Unit = { - consumer.subscribe(Collections.singleton(topic)) + consumer.subscribe(util.Set.of(topic)) try { while (consumerThreadRunning.get()) { consumer.poll(JDuration.ofSeconds(5)) @@ -4025,6 +4398,391 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } } + + @Test + def testDescribeStreamsGroups(): Unit = { + val streamsGroupId = "stream_group_id" + val testTopicName = "test_topic" + val testNumPartitions = 1 + + val config = createConfig + client = Admin.create(config) + + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId + ) + streams.poll(JDuration.ofMillis(500L)) + + try { + TestUtils.waitUntilTrue(() => { + val firstGroup = client.listGroups().all().get().stream() + .filter(g => g.groupId() == streamsGroupId).findFirst().orElse(null) + firstGroup.groupState().orElse(null) == GroupState.STABLE && firstGroup.groupId() == streamsGroupId + }, "Stream group not stable yet") + + // Verify the describe call works correctly + val describedGroups = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() + val group = describedGroups.get(streamsGroupId) + assertNotNull(group) + assertEquals(streamsGroupId, group.groupId()) + assertFalse(group.members().isEmpty) + assertNotNull(group.subtopologies()) + assertFalse(group.subtopologies().isEmpty) + + // Verify the topology contains the expected source and sink topics + val subtopologies = group.subtopologies().asScala + assertTrue(subtopologies.exists(subtopology => + subtopology.sourceTopics().contains(testTopicName))) + + // Test describing a non-existing group + val nonExistingGroup = "non_existing_stream_group" + val describedNonExistingGroupResponse = client.describeStreamsGroups(util.List.of(nonExistingGroup)) + assertFutureThrows(classOf[GroupIdNotFoundException], describedNonExistingGroupResponse.all()) + + } finally { + Utils.closeQuietly(streams, "streams") + Utils.closeQuietly(client, "adminClient") + } + } + + @Test + def testDescribeStreamsGroupsNotReady(): Unit = { + val streamsGroupId = "stream_group_id" + val testTopicName = "test_topic" + + val config = createConfig + client = Admin.create(config) + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId + ) + streams.poll(JDuration.ofMillis(500L)) + + try { + TestUtils.waitUntilTrue(() => { + val firstGroup = client.listGroups().all().get().stream() + .filter(g => g.groupId() == streamsGroupId).findFirst().orElse(null) + firstGroup.groupState().orElse(null) == GroupState.NOT_READY && firstGroup.groupId() == streamsGroupId + }, "Stream group not NOT_READY yet") + + // Verify the describe call works correctly + val describedGroups = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() + val group = describedGroups.get(streamsGroupId) + assertNotNull(group) + assertEquals(streamsGroupId, group.groupId()) + assertFalse(group.members().isEmpty) + assertNotNull(group.subtopologies()) + assertFalse(group.subtopologies().isEmpty) + + // Verify the topology contains the expected source and sink topics + val subtopologies = group.subtopologies().asScala + assertTrue(subtopologies.exists(subtopology => + subtopology.sourceTopics().contains(testTopicName))) + + } finally { + Utils.closeQuietly(streams, "streams") + Utils.closeQuietly(client, "adminClient") + } + } + + @Test + def testDeleteStreamsGroups(): Unit = { + val testTopicName = "test_topic" + val testNumPartitions = 3 + val testNumStreamsGroup = 3 + + val targetDeletedGroups = util.List.of("stream_group_id_2", "stream_group_id_3") + val targetRemainingGroups = util.List.of("stream_group_id_1") + + val config = createConfig + client = Admin.create(config) + + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) + + val streamsList = scala.collection.mutable.ListBuffer[(String, AsyncKafkaConsumer[_,_])]() + + try { + for (i <- 1 to testNumStreamsGroup) { + val streamsGroupId = s"stream_group_id_$i" + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId, + ) + streams.poll(JDuration.ofMillis(500L)) + streamsList += ((streamsGroupId, streams)) + } + + TestUtils.waitUntilTrue(() => { + val groups = client.listGroups().all().get() + groups.stream() + .anyMatch(g => g.groupId().startsWith("stream_group_id_")) && testNumStreamsGroup == groups.size() + }, "Streams groups not ready to delete yet") + + // Test deletion of non-empty existing groups + var deleteStreamsGroupResult = client.deleteStreamsGroups(targetDeletedGroups) + assertFutureThrows(classOf[GroupNotEmptyException], deleteStreamsGroupResult.all()) + assertEquals(2, deleteStreamsGroupResult.deletedGroups().size()) + + // Stop and clean up the streams for the groups that are going to be deleted + streamsList + .filter { case (groupId, _) => targetDeletedGroups.contains(groupId) } + .foreach { case (_, streams) => + streams.close() + } + + val listTopicResult = client.listTopics() + assertEquals(2, listTopicResult.names().get().size()) + + // Test deletion of emptied existing streams groups + deleteStreamsGroupResult = client.deleteStreamsGroups(targetDeletedGroups) + assertEquals(2, deleteStreamsGroupResult.deletedGroups().size()) + + // Wait for the deleted groups to be removed + TestUtils.waitUntilTrue(() => { + val groupIds = client.listGroups().all().get().asScala.map(_.groupId()).toSet + targetDeletedGroups.asScala.forall(id => !groupIds.contains(id)) + }, "Deleted groups not yet deleted") + + // Verify that the deleted groups are no longer present + val remainingGroups = client.listGroups().all().get() + assertEquals(targetRemainingGroups.size(), remainingGroups.size()) + remainingGroups.stream().forEach(g => { + assertTrue(targetRemainingGroups.contains(g.groupId())) + }) + + // Test deletion of a non-existing group + val nonExistingGroup = "non_existing_stream_group" + val deleteNonExistingGroupResult = client.deleteStreamsGroups(util.List.of(nonExistingGroup)) + assertFutureThrows(classOf[GroupIdNotFoundException], deleteNonExistingGroupResult.all()) + assertEquals(deleteNonExistingGroupResult.deletedGroups().size(), 1) + + } finally{ + streamsList.foreach { case (_, streams) => + streams.close() + } + Utils.closeQuietly(client, "adminClient") + } + } + + @Test + def testListStreamsGroupOffsets(): Unit = { + val streamsGroupId = "stream_group_id" + val testTopicName = "test_topic" + val testNumPartitions = 3 + + val config = createConfig + client = Admin.create(config) + val producer = createProducer(configOverrides = new Properties()) + + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) + + // Producer sends messages + for (i <- 1 to 20) { + TestUtils.waitUntilTrue(() => { + val producerRecord = producer.send( + new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) + .get() + producerRecord != null && producerRecord.topic() == testTopicName + }, "Fail to produce record to topic") + } + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId, + ) + + try { + TestUtils.waitUntilTrue(() => { + streams.poll(JDuration.ofMillis(100L)) + !streams.assignment().isEmpty + }, "Consumer not assigned to partitions") + + streams.poll(JDuration.ofMillis(1000L)) + streams.commitSync() + + TestUtils.waitUntilTrue(() => { + val firstGroup = client.listGroups().all().get().stream().findFirst().orElse(null) + firstGroup.groupState().orElse(null) == GroupState.STABLE && firstGroup.groupId() == streamsGroupId + }, "Stream group not stable yet") + + val allTopicPartitions = client.listStreamsGroupOffsets( + util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) + ).partitionsToOffsetAndMetadata(streamsGroupId).get() + assertNotNull(allTopicPartitions) + assertEquals(allTopicPartitions.size(), 3) + allTopicPartitions.forEach((topicPartition, offsetAndMetadata) => { + assertNotNull(topicPartition) + assertNotNull(offsetAndMetadata) + assertTrue(topicPartition.topic().startsWith(testTopicName)) + assertTrue(offsetAndMetadata.offset() >= 0) + }) + + } finally { + Utils.closeQuietly(streams, "streams") + Utils.closeQuietly(client, "adminClient") + Utils.closeQuietly(producer, "producer") + } + } + + @Test + def testDeleteStreamsGroupOffsets(): Unit = { + val streamsGroupId = "stream_group_id" + val testTopicName = "test_topic" + val testNumPartitions = 3 + + val config = createConfig + client = Admin.create(config) + val producer = createProducer(configOverrides = new Properties()) + + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) + // Producer sends messages + for (i <- 1 to 20) { + TestUtils.waitUntilTrue(() => { + val producerRecord = producer.send( + new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) + .get() + producerRecord != null && producerRecord.topic() == testTopicName + }, "Fail to produce record to topic") + } + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId, + ) + + try { + TestUtils.waitUntilTrue(() => { + streams.poll(JDuration.ofMillis(100L)) + !streams.assignment().isEmpty + }, "Consumer not assigned to partitions") + + streams.poll(JDuration.ofMillis(1000L)) + streams.commitSync() + + // List streams group offsets + TestUtils.waitUntilTrue(() => { + val allTopicPartitions = client.listStreamsGroupOffsets( + util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) + ).partitionsToOffsetAndMetadata(streamsGroupId).get() + allTopicPartitions!=null && allTopicPartitions.size() == testNumPartitions + },"Streams group offsets not ready to list yet") + + // Verify running Kstreams group cannot delete its own offsets + var deleteStreamsGroupOffsetsResult = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, 0))) + assertFutureThrows(classOf[GroupSubscribedToTopicException], deleteStreamsGroupOffsetsResult.all()) + + // Verity stopped Kstreams group can delete its own offsets + streams.close() + TestUtils.waitUntilTrue(() => { + val groupDescription = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() + groupDescription.get(streamsGroupId).groupState() == GroupState.EMPTY + }, "Streams group not closed yet") + deleteStreamsGroupOffsetsResult = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, 0))) + val res = deleteStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 0)).get() + assertNull(res) + + // Verify the group offsets after deletion + val allTopicPartitions = client.listStreamsGroupOffsets( + util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) + ).partitionsToOffsetAndMetadata(streamsGroupId).get() + assertEquals(testNumPartitions-1, allTopicPartitions.size()) + + // Verify non-existing topic partition couldn't be deleted + val deleteStreamsGroupOffsetsResultWithFakeTopic = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition("mock-topic", 1))) + assertFutureThrows(classOf[UnknownTopicOrPartitionException], deleteStreamsGroupOffsetsResultWithFakeTopic.all()) + val deleteStreamsGroupOffsetsResultWithFakePartition = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, testNumPartitions))) + assertFutureThrows(classOf[UnknownTopicOrPartitionException], deleteStreamsGroupOffsetsResultWithFakePartition.all()) + } finally { + Utils.closeQuietly(streams, "streams") + Utils.closeQuietly(client, "adminClient") + Utils.closeQuietly(producer, "producer") + } + } + + @Test + def testAlterStreamsGroupOffsets(): Unit = { + val streamsGroupId = "stream_group_id" + val testTopicName = "test_topic" + val testNumPartitions = 3 + + val config = createConfig + client = Admin.create(config) + val producer = createProducer(configOverrides = new Properties()) + + prepareTopics(List(testTopicName), testNumPartitions) + prepareRecords(testTopicName) + + // Producer sends messages + for (i <- 1 to 20) { + TestUtils.waitUntilTrue(() => { + val producerRecord = producer.send( + new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) + .get() + producerRecord != null && producerRecord.topic() == testTopicName + }, "Fail to produce record to topic") + } + + val streams = createStreamsGroup( + inputTopic = testTopicName, + streamsGroupId = streamsGroupId, + ) + + try { + TestUtils.waitUntilTrue(() => { + streams.poll(JDuration.ofMillis(100L)) + !streams.assignment().isEmpty + }, "Consumer not assigned to partitions") + + streams.poll(JDuration.ofMillis(1000L)) + streams.commitSync() + + // List streams group offsets + TestUtils.waitUntilTrue(() => { + val allTopicPartitions = client.listStreamsGroupOffsets( + util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) + ).partitionsToOffsetAndMetadata(streamsGroupId).get() + allTopicPartitions!=null && allTopicPartitions.size() == testNumPartitions + },"Streams group offsets not ready to list yet") + + // Verity stopped Kstreams group can delete its own offsets + streams.close() + TestUtils.waitUntilTrue(() => { + val groupDescription = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() + groupDescription.get(streamsGroupId).groupState() == GroupState.EMPTY + }, "Streams group not closed yet") + + val offsets = util.Map.of( + new TopicPartition(testTopicName, 0), new OffsetAndMetadata(1L), + new TopicPartition(testTopicName, 1), new OffsetAndMetadata(10L) + ) + val alterStreamsGroupOffsetsResult = client.alterStreamsGroupOffsets(streamsGroupId, offsets) + val res0 = alterStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 0)).get() + val res1 = alterStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 1)).get() + assertTrue(res0 == null && res1 == null, "Alter streams group offsets should return null for each partition result") + + val allTopicPartitions = client.listStreamsGroupOffsets( + util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) + ).partitionsToOffsetAndMetadata(streamsGroupId).get() + assertNotNull(allTopicPartitions) + assertEquals(testNumPartitions, allTopicPartitions.size()) + assertEquals(1L, allTopicPartitions.get(new TopicPartition(testTopicName, 0)).offset()) + assertEquals(10L, allTopicPartitions.get(new TopicPartition(testTopicName, 1)).offset()) + + } finally { + Utils.closeQuietly(streams, "streams") + Utils.closeQuietly(client, "adminClient") + Utils.closeQuietly(producer, "producer") + } + } } object PlaintextAdminIntegrationTest { @@ -4038,20 +4796,20 @@ object PlaintextAdminIntegrationTest { retentionMs: String): Unit = { // Alter topics val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), OpType.SET))) - alterConfigs.put(topicResource2, util.Arrays.asList( + alterConfigs.put(topicResource1, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), OpType.SET))) + alterConfigs.put(topicResource2, util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) var alterResult = admin.incrementalAlterConfigs(alterConfigs) - assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(topicResource1, topicResource2), alterResult.values.keySet) alterResult.all.get // Verify that topics were updated correctly test.ensureConsistentKRaftMetadata() // Intentionally include duplicate resources to test if describeConfigs can handle them correctly. - var describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, topicResource2).asJava) + var describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, topicResource2)) var configs = describeResult.all.get assertEquals(2, configs.size) @@ -4064,16 +4822,16 @@ object PlaintextAdminIntegrationTest { assertEquals("lz4", configs.get(topicResource2).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) // Alter topics with validateOnly=true - alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10"), OpType.SET))) - alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3"), OpType.SET))) + alterConfigs.put(topicResource1, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10"), OpType.SET))) + alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3"), OpType.SET))) alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) - assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(topicResource1, topicResource2), alterResult.values.keySet) alterResult.all.get // Verify that topics were not updated due to validateOnly = true test.ensureConsistentKRaftMetadata() - describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2).asJava) + describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2)) configs = describeResult.all.get assertEquals(2, configs.size) @@ -4099,22 +4857,22 @@ object PlaintextAdminIntegrationTest { // Alter configs: first and third are invalid, second is valid val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - alterConfigs.put(topicResource1, util.Arrays.asList( + alterConfigs.put(topicResource1, util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) - alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), OpType.SET))) - alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) + alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), OpType.SET))) + alterConfigs.put(brokerResource, util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) var alterResult = admin.incrementalAlterConfigs(alterConfigs) - assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(topicResource1, topicResource2, brokerResource), alterResult.values.keySet) assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource1)) alterResult.values.get(topicResource2).get assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) // Verify that first and third resources were not updated and second was updated test.ensureConsistentKRaftMetadata() - var describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava) + var describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, brokerResource)) var configs = describeResult.all.get assertEquals(3, configs.size) @@ -4128,22 +4886,22 @@ object PlaintextAdminIntegrationTest { assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) // Alter configs with validateOnly = true: first and third are invalid, second is valid - alterConfigs.put(topicResource1, util.Arrays.asList( + alterConfigs.put(topicResource1, util.List.of( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) - alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), OpType.SET))) - alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) + alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), OpType.SET))) + alterConfigs.put(brokerResource, util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) - assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) + assertEquals(util.Set.of(topicResource1, topicResource2, brokerResource), alterResult.values.keySet) assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource1)) alterResult.values.get(topicResource2).get assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) // Verify that no resources are updated since validate_only = true test.ensureConsistentKRaftMetadata() - describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava) + describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, brokerResource)) configs = describeResult.all.get assertEquals(3, configs.size) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala deleted file mode 100644 index 474e10100d877..0000000000000 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.utils.{TestInfoUtils, TestUtils} -import java.util.Properties -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.common.TopicPartition -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import scala.jdk.CollectionConverters._ - -/** - * Integration tests for the consumer that covers logic related to manual assignment. - */ -@Timeout(600) -class PlaintextConsumerAssignTest extends AbstractConsumerTest { - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndCommitAsyncNotCommitted(groupProtocol: String): Unit = { - val props = new Properties() - val consumer = createConsumer(configOverrides = props) - val producer = createProducer() - val numRecords = 10000 - val startingTimestamp = System.currentTimeMillis() - val cb = new CountConsumerCommitCallback - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - consumer.assign(List(tp).asJava) - consumer.commitAsync(cb) - TestUtils.pollUntilTrue(consumer, () => cb.successCount >= 1 || cb.lastError.isDefined, - "Failed to observe commit callback before timeout", waitTimeMs = 10000) - val committedOffset = consumer.committed(Set(tp).asJava) - assertNotNull(committedOffset) - // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to - // tp. The committed offset should be null. This is intentional. - assertNull(committedOffset.get(tp)) - assertTrue(consumer.assignment.contains(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndCommitSyncNotCommitted(groupProtocol: String): Unit = { - val props = new Properties() - val consumer = createConsumer(configOverrides = props) - val producer = createProducer() - val numRecords = 10000 - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - consumer.assign(List(tp).asJava) - consumer.commitSync() - val committedOffset = consumer.committed(Set(tp).asJava) - assertNotNull(committedOffset) - // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to - // tp. The committed offset should be null. This is intentional. - assertNull(committedOffset.get(tp)) - assertTrue(consumer.assignment.contains(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndCommitSyncAllConsumed(groupProtocol: String): Unit = { - val numRecords = 10000 - - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - val props = new Properties() - val consumer = createConsumer(configOverrides = props) - consumer.assign(List(tp).asJava) - consumer.seek(tp, 0) - consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) - - consumer.commitSync() - val committedOffset = consumer.committed(Set(tp).asJava) - assertNotNull(committedOffset) - assertNotNull(committedOffset.get(tp)) - assertEquals(numRecords, committedOffset.get(tp).offset()) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndConsume(groupProtocol: String): Unit = { - val numRecords = 10 - - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - val props = new Properties() - val consumer = createConsumer(configOverrides = props, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) - - assertEquals(numRecords, consumer.position(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndConsumeSkippingPosition(groupProtocol: String): Unit = { - val numRecords = 10 - - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - val props = new Properties() - val consumer = createConsumer(configOverrides = props, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(List(tp).asJava) - val offset = 1 - consumer.seek(tp, offset) - consumeAndVerifyRecords(consumer = consumer, numRecords - offset, startingOffset = offset, - startingKeyAndValueIndex = offset, startingTimestamp = startingTimestamp + offset) - - assertEquals(numRecords, consumer.position(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndFetchCommittedOffsets(groupProtocol: String): Unit = { - val numRecords = 100 - val startingTimestamp = System.currentTimeMillis() - val producer = createProducer() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - val props = new Properties() - val consumer = createConsumer(configOverrides = props) - consumer.assign(List(tp).asJava) - // First consumer consumes and commits offsets - consumer.seek(tp, 0) - consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, - startingTimestamp = startingTimestamp) - consumer.commitSync() - assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) - // We should see the committed offsets from another consumer - val anotherConsumer = createConsumer(configOverrides = props) - anotherConsumer.assign(List(tp).asJava) - assertEquals(numRecords, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndConsumeFromCommittedOffsets(groupProtocol: String): Unit = { - val producer = createProducer() - val numRecords = 100 - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = numRecords, tp, startingTimestamp = startingTimestamp) - - // Commit offset with first consumer - val props = new Properties() - props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group1") - val consumer = createConsumer(configOverrides = props) - consumer.assign(List(tp).asJava) - val offset = 10 - consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(offset))) - .asJava) - assertEquals(offset, consumer.committed(Set(tp).asJava).get(tp).offset) - consumer.close() - - // Consume from committed offsets with another consumer in same group - val anotherConsumer = createConsumer(configOverrides = props) - assertEquals(offset, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) - anotherConsumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = anotherConsumer, numRecords - offset, - startingOffset = offset, startingKeyAndValueIndex = offset, - startingTimestamp = startingTimestamp + offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAssignAndRetrievingCommittedOffsetsMultipleTimes(groupProtocol: String): Unit = { - val numRecords = 100 - val startingTimestamp = System.currentTimeMillis() - val producer = createProducer() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - val props = new Properties() - val consumer = createConsumer(configOverrides = props) - consumer.assign(List(tp).asJava) - - // Consume and commit offsets - consumer.seek(tp, 0) - consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, - startingTimestamp = startingTimestamp) - consumer.commitSync() - - // Check committed offsets twice with same consumer - assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) - } - -} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala index 6923199b93ddd..bd36c22127fdf 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala @@ -25,7 +25,6 @@ import java.util import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock import scala.collection.mutable -import scala.jdk.CollectionConverters._ /** * Integration tests for the consumer that covers assignors logic (client and server side assignors) @@ -52,7 +51,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) awaitAssignment(consumer, expectedAssignment) // add one more topic with 2 partitions @@ -60,11 +59,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { createTopicAndSendRecords(producer, topic3, 2, 100) val newExpectedAssignment = expectedAssignment ++ Set(new TopicPartition(topic3, 0), new TopicPartition(topic3, 1)) - consumer.subscribe(List(topic1, topic2, topic3).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2, topic3)) awaitAssignment(consumer, newExpectedAssignment) // remove the topic we just added - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) awaitAssignment(consumer, expectedAssignment) consumer.unsubscribe() @@ -251,7 +250,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(List(topic1).asJava) + consumer.subscribe(java.util.List.of(topic1)) val e: UnsupportedAssignorException = assertThrows( classOf[UnsupportedAssignorException], @@ -282,7 +281,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) awaitAssignment(consumer, expectedAssignment) // add one more topic with 2 partitions @@ -290,11 +289,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { val additionalAssignment = createTopicAndSendRecords(producer, topic3, 2, 100) val newExpectedAssignment = expectedAssignment ++ additionalAssignment - consumer.subscribe(List(topic1, topic2, topic3).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2, topic3)) awaitAssignment(consumer, newExpectedAssignment) // remove the topic we just added - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) awaitAssignment(consumer, expectedAssignment) consumer.unsubscribe() diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala deleted file mode 100644 index 0445e81cac138..0000000000000 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala +++ /dev/null @@ -1,371 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.utils.{TestInfoUtils, TestUtils} -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.producer.ProducerRecord -import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer} -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.test.MockConsumerInterceptor -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import java.time.Duration -import java.util -import java.util.Optional -import scala.jdk.CollectionConverters._ - -/** - * Integration tests for the consumer that covers the logic related to committing offsets. - */ -@Timeout(600) -class PlaintextConsumerCommitTest extends AbstractConsumerTest { - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoCommitOnClose(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") - val consumer = createConsumer() - - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords, tp) - - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, Set(tp, tp2)) - - // should auto-commit sought positions before closing - consumer.seek(tp, 300) - consumer.seek(tp2, 500) - consumer.close() - - // now we should see the committed positions from another consumer - val anotherConsumer = createConsumer() - assertEquals(300, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(500, anotherConsumer.committed(Set(tp2).asJava).get(tp2).offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoCommitOnCloseAfterWakeup(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") - val consumer = createConsumer() - - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords, tp) - - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, Set(tp, tp2)) - - // should auto-commit sought positions before closing - consumer.seek(tp, 300) - consumer.seek(tp2, 500) - - // wakeup the consumer before closing to simulate trying to break a poll - // loop from another thread - consumer.wakeup() - consumer.close() - - // now we should see the committed positions from another consumer - val anotherConsumer = createConsumer() - assertEquals(300, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(500, anotherConsumer.committed(Set(tp2).asJava).get(tp2).offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitMetadata(groupProtocol: String): Unit = { - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - - // sync commit - val syncMetadata = new OffsetAndMetadata(5, Optional.of(15), "foo") - consumer.commitSync(Map((tp, syncMetadata)).asJava) - assertEquals(syncMetadata, consumer.committed(Set(tp).asJava).get(tp)) - - // async commit - val asyncMetadata = new OffsetAndMetadata(10, "bar") - sendAndAwaitAsyncCommit(consumer, Some(Map(tp -> asyncMetadata))) - assertEquals(asyncMetadata, consumer.committed(Set(tp).asJava).get(tp)) - - // handle null metadata - val nullMetadata = new OffsetAndMetadata(5, null) - consumer.commitSync(Map(tp -> nullMetadata).asJava) - assertEquals(nullMetadata, consumer.committed(Set(tp).asJava).get(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAsyncCommit(groupProtocol: String): Unit = { - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - - val callback = new CountConsumerCommitCallback - val count = 5 - - for (i <- 1 to count) - consumer.commitAsync(Map(tp -> new OffsetAndMetadata(i)).asJava, callback) - - TestUtils.pollUntilTrue(consumer, () => callback.successCount >= count || callback.lastError.isDefined, - "Failed to observe commit callback before timeout", waitTimeMs = 10000) - - assertEquals(None, callback.lastError) - assertEquals(count, callback.successCount) - assertEquals(new OffsetAndMetadata(count), consumer.committed(Set(tp).asJava).get(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoCommitIntercept(groupProtocol: String): Unit = { - val topic2 = "topic2" - createTopic(topic2, 2, brokerCount) - - // produce records - val numRecords = 100 - val testProducer = createProducer(keySerializer = new StringSerializer, valueSerializer = new StringSerializer) - (0 until numRecords).map { i => - testProducer.send(new ProducerRecord(tp.topic(), tp.partition(), s"key $i", s"value $i")) - }.foreach(_.get) - - // create consumer with interceptor - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") - this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") - val testConsumer = createConsumer(keyDeserializer = new StringDeserializer, valueDeserializer = new StringDeserializer) - val rebalanceListener = new ConsumerRebalanceListener { - override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - // keep partitions paused in this test so that we can verify the commits based on specific seeks - testConsumer.pause(partitions) - } - - override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {} - } - changeConsumerSubscriptionAndValidateAssignment(testConsumer, List(topic), Set(tp, tp2), rebalanceListener) - testConsumer.seek(tp, 10) - testConsumer.seek(tp2, 20) - - // change subscription to trigger rebalance - val commitCountBeforeRebalance = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() - changeConsumerSubscriptionAndValidateAssignment(testConsumer, - List(topic, topic2), - Set(tp, tp2, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)), - rebalanceListener) - - // after rebalancing, we should have reset to the committed positions - assertEquals(10, testConsumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(20, testConsumer.committed(Set(tp2).asJava).get(tp2).offset) - - // In both CLASSIC and CONSUMER protocols, interceptors are executed in poll and close. - // However, in the CONSUMER protocol, the assignment may be changed outside of a poll, so - // we need to poll once to ensure the interceptor is called. - if (groupProtocol.toUpperCase == GroupProtocol.CONSUMER.name) { - testConsumer.poll(Duration.ZERO) - } - - assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeRebalance) - - // verify commits are intercepted on close - val commitCountBeforeClose = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() - testConsumer.close() - assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeClose) - testProducer.close() - - // cleanup - MockConsumerInterceptor.resetCounters() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitSpecifiedOffsets(groupProtocol: String): Unit = { - val producer = createProducer() - sendRecords(producer, numRecords = 5, tp) - sendRecords(producer, numRecords = 7, tp2) - - val consumer = createConsumer() - consumer.assign(List(tp, tp2).asJava) - - val pos1 = consumer.position(tp) - val pos2 = consumer.position(tp2) - consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(3L))).asJava) - assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) - assertNull(consumer.committed(Set(tp2).asJava).get(tp2)) - - // Positions should not change - assertEquals(pos1, consumer.position(tp)) - assertEquals(pos2, consumer.position(tp2)) - consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(5L))).asJava) - assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(5, consumer.committed(Set(tp2).asJava).get(tp2).offset) - - // Using async should pick up the committed changes after commit completes - sendAndAwaitAsyncCommit(consumer, Some(Map(tp2 -> new OffsetAndMetadata(7L)))) - assertEquals(7, consumer.committed(Set(tp2).asJava).get(tp2).offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoCommitOnRebalance(groupProtocol: String): Unit = { - val topic2 = "topic2" - createTopic(topic2, 2, brokerCount) - - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") - val consumer = createConsumer() - - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords, tp) - - val rebalanceListener = new ConsumerRebalanceListener { - override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - // keep partitions paused in this test so that we can verify the commits based on specific seeks - consumer.pause(partitions) - } - - override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {} - } - - consumer.subscribe(List(topic).asJava, rebalanceListener) - - awaitAssignment(consumer, Set(tp, tp2)) - - consumer.seek(tp, 300) - consumer.seek(tp2, 500) - - // change subscription to trigger rebalance - consumer.subscribe(List(topic, topic2).asJava, rebalanceListener) - - val newAssignment = Set(tp, tp2, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)) - awaitAssignment(consumer, newAssignment) - - // after rebalancing, we should have reset to the committed positions - assertEquals(300, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(500, consumer.committed(Set(tp2).asJava).get(tp2).offset) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSubscribeAndCommitSync(groupProtocol: String): Unit = { - // This test ensure that the member ID is propagated from the group coordinator when the - // assignment is received into a subsequent offset commit - val consumer = createConsumer() - assertEquals(0, consumer.assignment.size) - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, Set(tp, tp2)) - - consumer.seek(tp, 0) - - consumer.commitSync() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPositionAndCommit(groupProtocol: String): Unit = { - val producer = createProducer() - var startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) - - val topicPartition = new TopicPartition(topic, 15) - val consumer = createConsumer() - assertNull(consumer.committed(Set(topicPartition).asJava).get(topicPartition)) - - // position() on a partition that we aren't subscribed to throws an exception - assertThrows(classOf[IllegalStateException], () => consumer.position(topicPartition)) - - consumer.assign(List(tp).asJava) - - assertEquals(0L, consumer.position(tp), "position() on a partition that we are subscribed to should reset the offset") - consumer.commitSync() - assertEquals(0L, consumer.committed(Set(tp).asJava).get(tp).offset) - consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) - assertEquals(5L, consumer.position(tp), "After consuming 5 records, position should be 5") - consumer.commitSync() - assertEquals(5L, consumer.committed(Set(tp).asJava).get(tp).offset, "Committed offset should be returned") - - startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 1, tp, startingTimestamp = startingTimestamp) - - // another consumer in the same group should get the same position - val otherConsumer = createConsumer() - otherConsumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = otherConsumer, numRecords = 1, startingOffset = 5, startingTimestamp = startingTimestamp) - } - - // TODO: This only works in the new consumer, but should be fixed for the old consumer as well - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testCommitAsyncCompletedBeforeConsumerCloses(groupProtocol: String): Unit = { - // This is testing the contract that asynchronous offset commit are completed before the consumer - // is closed, even when no commit sync is performed as part of the close (due to auto-commit - // disabled, or simply because there are no consumed offsets). - val producer = createProducer() - sendRecords(producer, numRecords = 3, tp) - sendRecords(producer, numRecords = 3, tp2) - - val consumer = createConsumer() - consumer.assign(List(tp, tp2).asJava) - - // Try without looking up the coordinator first - val cb = new CountConsumerCommitCallback - consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(1L))).asJava, cb) - consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(1L))).asJava, cb) - consumer.close() - assertEquals(2, cb.successCount) - } - - // TODO: This only works in the new consumer, but should be fixed for the old consumer as well - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testCommitAsyncCompletedBeforeCommitSyncReturns(groupProtocol: String): Unit = { - // This is testing the contract that asynchronous offset commits sent previously with the - // `commitAsync` are guaranteed to have their callbacks invoked prior to completion of - // `commitSync` (given that it does not time out). - val producer = createProducer() - sendRecords(producer, numRecords = 3, tp) - sendRecords(producer, numRecords = 3, tp2) - - val consumer = createConsumer() - consumer.assign(List(tp, tp2).asJava) - - // Try without looking up the coordinator first - val cb = new CountConsumerCommitCallback - consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(1L))).asJava, cb) - consumer.commitSync(Map.empty[TopicPartition, OffsetAndMetadata].asJava) - assertEquals(1, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(1, cb.successCount) - - // Try with coordinator known - consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(2L))).asJava, cb) - consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(2L))).asJava) - assertEquals(2, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(2, consumer.committed(Set(tp2).asJava).get(tp2).offset) - assertEquals(2, cb.successCount) - - // Try with empty sync commit - consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(3L))).asJava, cb) - consumer.commitSync(Map.empty[TopicPartition, OffsetAndMetadata].asJava) - assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(2, consumer.committed(Set(tp2).asJava).get(tp2).offset) - assertEquals(3, cb.successCount) - } - - def changeConsumerSubscriptionAndValidateAssignment[K, V](consumer: Consumer[K, V], - topicsToSubscribe: List[String], - expectedAssignment: Set[TopicPartition], - rebalanceListener: ConsumerRebalanceListener): Unit = { - consumer.subscribe(topicsToSubscribe.asJava, rebalanceListener) - awaitAssignment(consumer, expectedAssignment) - } -} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala deleted file mode 100644 index f1a91658669f5..0000000000000 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala +++ /dev/null @@ -1,283 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.utils.TestInfoUtils -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.producer.ProducerRecord -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource -import org.apache.kafka.common.TopicPartition - -import java.time.{Duration, Instant} -import scala.jdk.CollectionConverters._ - -/** - * Integration tests for the consumer that covers fetching logic - */ -@Timeout(600) -class PlaintextConsumerFetchTest extends AbstractConsumerTest { - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchInvalidOffset(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none") - val consumer = createConsumer(configOverrides = this.consumerConfig) - - // produce one record - val totalRecords = 2 - val producer = createProducer() - sendRecords(producer, totalRecords, tp) - consumer.assign(List(tp).asJava) - - // poll should fail because there is no offset reset strategy set. - // we fail only when resetting positions after coordinator is known, so using a long timeout. - assertThrows(classOf[NoOffsetForPartitionException], () => consumer.poll(Duration.ofMillis(15000))) - - // seek to out of range position - val outOfRangePos = totalRecords + 1 - consumer.seek(tp, outOfRangePos) - val e = assertThrows(classOf[OffsetOutOfRangeException], () => consumer.poll(Duration.ofMillis(20000))) - val outOfRangePartitions = e.offsetOutOfRangePartitions() - assertNotNull(outOfRangePartitions) - assertEquals(1, outOfRangePartitions.size) - assertEquals(outOfRangePos.toLong, outOfRangePartitions.get(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchOutOfRangeOffsetResetConfigEarliest(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") - // ensure no in-flight fetch request so that the offset can be reset immediately - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") - val consumer = createConsumer(configOverrides = this.consumerConfig) - val totalRecords = 10L - - val producer = createProducer() - val startingTimestamp = 0 - sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) - consumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt, startingOffset = 0) - // seek to out of range position - val outOfRangePos = totalRecords + 1 - consumer.seek(tp, outOfRangePos) - // assert that poll resets to the beginning position - consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0) - } - - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchOutOfRangeOffsetResetConfigLatest(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") - // ensure no in-flight fetch request so that the offset can be reset immediately - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") - val consumer = createConsumer(configOverrides = this.consumerConfig) - val totalRecords = 10L - - val producer = createProducer() - val startingTimestamp = 0 - sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) - consumer.assign(List(tp).asJava) - consumer.seek(tp, 0) - // consume some, but not all of the records - consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt / 2, startingOffset = 0) - // seek to out of range position - val outOfRangePos = totalRecords + 17 // arbitrary, much higher offset - consumer.seek(tp, outOfRangePos) - // assert that poll resets to the ending position - assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) - sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = totalRecords) - val nextRecord = consumer.poll(Duration.ofMillis(50)).iterator().next() - // ensure the seek went to the last known record at the time of the previous poll - assertEquals(totalRecords, nextRecord.offset()) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchOutOfRangeOffsetResetConfigByDuration(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "by_duration:PT1H") - // ensure no in-flight fetch request so that the offset can be reset immediately - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") - - // Test the scenario where the requested duration much earlier than the starting offset - val consumer1 = createConsumer(configOverrides = this.consumerConfig) - val producer1 = createProducer() - val totalRecords = 10L - var startingTimestamp = System.currentTimeMillis() - sendRecords(producer1, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) - consumer1.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = consumer1, numRecords = totalRecords.toInt, startingOffset = 0, startingTimestamp = startingTimestamp) - - // seek to out of range position - var outOfRangePos = totalRecords + 1 - consumer1.seek(tp, outOfRangePos) - // assert that poll resets to the beginning position - consumeAndVerifyRecords(consumer = consumer1, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) - - // Test the scenario where starting offset is earlier than the requested duration - val consumer2 = createConsumer(configOverrides = this.consumerConfig) - val producer2 = createProducer() - val totalRecords2 = 25L - startingTimestamp = Instant.now().minus(Duration.ofHours(24)).toEpochMilli - //generate records with 1 hour interval for 1 day - sendRecords(producer2, totalRecords2.toInt, tp2, startingTimestamp = startingTimestamp, Duration.ofHours(1).toMillis) - consumer2.assign(List(tp2).asJava) - //consumer should read one record from last one hour - consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, - startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, - tp = tp2, - timestampIncrement = Duration.ofHours(1).toMillis) - - // seek to out of range position - outOfRangePos = totalRecords2 + 1 - consumer2.seek(tp2, outOfRangePos) - // assert that poll resets to the duration offset. consumer should read one record from last one hour - consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, - startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, - tp = tp2, - timestampIncrement = Duration.ofHours(1).toMillis) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchRecordLargerThanFetchMaxBytes(groupProtocol: String): Unit = { - val maxFetchBytes = 10 * 1024 - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxFetchBytes.toString) - checkLargeRecord(maxFetchBytes + 1) - } - - private def checkLargeRecord(producerRecordSize: Int): Unit = { - val consumer = createConsumer() - - // produce a record that is larger than the configured fetch size - val record = new ProducerRecord(tp.topic(), tp.partition(), "key".getBytes, - new Array[Byte](producerRecordSize)) - val producer = createProducer() - producer.send(record) - - // consuming a record that is too large should succeed since KIP-74 - consumer.assign(List(tp).asJava) - val records = consumer.poll(Duration.ofMillis(20000)) - assertEquals(1, records.count) - val consumerRecord = records.iterator().next() - assertEquals(0L, consumerRecord.offset) - assertEquals(tp.topic(), consumerRecord.topic()) - assertEquals(tp.partition(), consumerRecord.partition()) - assertArrayEquals(record.key(), consumerRecord.key()) - assertArrayEquals(record.value(), consumerRecord.value()) - } - - /** We should only return a large record if it's the first record in the first non-empty partition of the fetch request */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchHonoursFetchSizeIfLargeRecordNotFirst(groupProtocol: String): Unit = { - val maxFetchBytes = 10 * 1024 - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxFetchBytes.toString) - checkFetchHonoursSizeIfLargeRecordNotFirst(maxFetchBytes) - } - - private def checkFetchHonoursSizeIfLargeRecordNotFirst(largeProducerRecordSize: Int): Unit = { - val consumer = createConsumer() - - val smallRecord = new ProducerRecord(tp.topic(), tp.partition(), "small".getBytes, - "value".getBytes) - val largeRecord = new ProducerRecord(tp.topic(), tp.partition(), "large".getBytes, - new Array[Byte](largeProducerRecordSize)) - - val producer = createProducer() - producer.send(smallRecord).get - producer.send(largeRecord).get - - // we should only get the small record in the first `poll` - consumer.assign(List(tp).asJava) - val records = consumer.poll(Duration.ofMillis(20000)) - assertEquals(1, records.count) - val consumerRecord = records.iterator().next() - assertEquals(0L, consumerRecord.offset) - assertEquals(tp.topic(), consumerRecord.topic()) - assertEquals(tp.partition(), consumerRecord.partition()) - assertArrayEquals(smallRecord.key(), consumerRecord.key()) - assertArrayEquals(smallRecord.value(), consumerRecord.value()) - } - - /** We should only return a large record if it's the first record in the first partition of the fetch request */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst(groupProtocol: String): Unit = { - val maxPartitionFetchBytes = 10 * 1024 - this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes.toString) - checkFetchHonoursSizeIfLargeRecordNotFirst(maxPartitionFetchBytes) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchRecordLargerThanMaxPartitionFetchBytes(groupProtocol: String): Unit = { - val maxPartitionFetchBytes = 10 * 1024 - this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes.toString) - checkLargeRecord(maxPartitionFetchBytes + 1) - } - - /** Test that we consume all partitions if fetch max bytes and max.partition.fetch.bytes are low */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testLowMaxFetchSizeForRequestAndPartition(groupProtocol: String): Unit = { - // one of the effects of this is that there will be some log reads where `0 > remaining limit bytes < message size` - // and we don't return the message because it's not the first message in the first non-empty partition of the fetch - // this behaves a little different than when remaining limit bytes is 0 and it's important to test it - this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, "500") - this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, "100") - - // Avoid a rebalance while the records are being sent (the default is 6 seconds) - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 20000.toString) - val consumer = createConsumer() - - val topic1 = "topic1" - val topic2 = "topic2" - val topic3 = "topic3" - val partitionCount = 30 - val topics = Seq(topic1, topic2, topic3) - topics.foreach { topicName => - createTopic(topicName, partitionCount, brokerCount) - } - - val partitions = topics.flatMap { topic => - (0 until partitionCount).map(new TopicPartition(topic, _)) - } - - assertEquals(0, consumer.assignment().size) - - consumer.subscribe(List(topic1, topic2, topic3).asJava) - - awaitAssignment(consumer, partitions.toSet) - - val producer = createProducer() - - val producerRecords = partitions.flatMap(sendRecords(producer, numRecords = partitionCount, _)) - - val consumerRecords = consumeRecords(consumer, producerRecords.size) - - val expected = producerRecords.map { record => - (record.topic, record.partition, new String(record.key), new String(record.value), record.timestamp) - }.toSet - - val actual = consumerRecords.map { record => - (record.topic, record.partition, new String(record.key), new String(record.value), record.timestamp) - }.toSet - - assertEquals(expected, actual) - } - -} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala deleted file mode 100644 index 42e9c50fc4c7c..0000000000000 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala +++ /dev/null @@ -1,307 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.utils.{TestInfoUtils, TestUtils} -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.common.{MetricName, TopicPartition} -import org.apache.kafka.common.utils.Utils -import org.apache.kafka.coordinator.group.GroupCoordinatorConfig -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import java.time.Duration -import java.util -import java.util.Properties -import scala.collection.mutable -import scala.jdk.CollectionConverters._ - -/** - * Integration tests for the consumer that covers the poll logic - */ -@Timeout(600) -class PlaintextConsumerPollTest extends AbstractConsumerTest { - - override protected def brokerPropertyOverrides(properties: Properties): Unit = { - super.brokerPropertyOverrides(properties) - properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMaxPollRecords(groupProtocol: String): Unit = { - val maxPollRecords = 2 - val numRecords = 10000 - - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer, numRecords = numRecords, startingOffset = 0, maxPollRecords = maxPollRecords, - startingTimestamp = startingTimestamp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMaxPollIntervalMs(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 2000.toString) - } - - val consumer = createConsumer() - - val listener = new TestConsumerReassignmentListener() - consumer.subscribe(List(topic).asJava, listener) - - // rebalance to get the initial assignment - awaitRebalance(consumer, listener) - assertEquals(1, listener.callsToAssigned) - assertEquals(0, listener.callsToRevoked) - - // after we extend longer than max.poll a rebalance should be triggered - // NOTE we need to have a relatively much larger value than max.poll to let heartbeat expired for sure - Thread.sleep(3000) - - awaitRebalance(consumer, listener) - assertEquals(2, listener.callsToAssigned) - assertEquals(1, listener.callsToRevoked) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMaxPollIntervalMsDelayInRevocation(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) - } - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) - - val consumer = createConsumer() - var commitCompleted = false - var committedPosition: Long = -1 - - val listener = new TestConsumerReassignmentListener { - override def onPartitionsLost(partitions: util.Collection[TopicPartition]): Unit = {} - - override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { - if (!partitions.isEmpty && partitions.contains(tp)) { - // on the second rebalance (after we have joined the group initially), sleep longer - // than session timeout and then try a commit. We should still be in the group, - // so the commit should succeed - Utils.sleep(1500) - committedPosition = consumer.position(tp) - consumer.commitSync(Map(tp -> new OffsetAndMetadata(committedPosition)).asJava) - commitCompleted = true - } - super.onPartitionsRevoked(partitions) - } - } - - consumer.subscribe(List(topic).asJava, listener) - - // rebalance to get the initial assignment - awaitRebalance(consumer, listener) - - // force a rebalance to trigger an invocation of the revocation callback while in the group - consumer.subscribe(List("otherTopic").asJava, listener) - awaitRebalance(consumer, listener) - - assertEquals(0, committedPosition) - assertTrue(commitCompleted) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMaxPollIntervalMsDelayInAssignment(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) - } - this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) - - val consumer = createConsumer() - val listener = new TestConsumerReassignmentListener { - override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - // sleep longer than the session timeout, we should still be in the group after invocation - Utils.sleep(1500) - super.onPartitionsAssigned(partitions) - } - } - consumer.subscribe(List(topic).asJava, listener) - - // rebalance to get the initial assignment - awaitRebalance(consumer, listener) - - // We should still be in the group after this invocation - ensureNoRebalance(consumer, listener) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMaxPollIntervalMsShorterThanPollTimeout(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - } - - val consumer = createConsumer() - val listener = new TestConsumerReassignmentListener - consumer.subscribe(List(topic).asJava, listener) - - // rebalance to get the initial assignment - awaitRebalance(consumer, listener) - - val callsToAssignedAfterFirstRebalance = listener.callsToAssigned - - consumer.poll(Duration.ofMillis(2000)) - - // If the poll poll above times out, it would trigger a rebalance. - // Leave some time for the rebalance to happen and check for the rebalance event. - consumer.poll(Duration.ofMillis(500)) - consumer.poll(Duration.ofMillis(500)) - - assertEquals(callsToAssignedAfterFirstRebalance, listener.callsToAssigned) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLeadWithMaxPollRecords(groupProtocol: String): Unit = { - val numMessages = 1000 - val maxPollRecords = 10 - val producer = createProducer() - sendRecords(producer, numMessages, tp) - - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords") - consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - awaitNonEmptyRecords(consumer, tp) - - val tags = new util.HashMap[String, String]() - tags.put("client-id", "testPerPartitionLeadWithMaxPollRecords") - tags.put("topic", tp.topic()) - tags.put("partition", String.valueOf(tp.partition())) - val lead = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)) - assertEquals(maxPollRecords, lead.metricValue().asInstanceOf[Double], s"The lead should be $maxPollRecords") - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLagWithMaxPollRecords(groupProtocol: String): Unit = { - val numMessages = 1000 - val maxPollRecords = 10 - val producer = createProducer() - sendRecords(producer, numMessages, tp) - - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords") - consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - val records = awaitNonEmptyRecords(consumer, tp) - - val tags = new util.HashMap[String, String]() - tags.put("client-id", "testPerPartitionLagWithMaxPollRecords") - tags.put("topic", tp.topic()) - tags.put("partition", String.valueOf(tp.partition())) - val lag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) - - assertEquals(numMessages - records.count, lag.metricValue.asInstanceOf[Double], epsilon, s"The lag should be ${numMessages - records.count}") - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMultiConsumerSessionTimeoutOnStopPolling(groupProtocol: String): Unit = { - runMultiConsumerSessionTimeoutTest(false) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMultiConsumerSessionTimeoutOnClose(groupProtocol: String): Unit = { - runMultiConsumerSessionTimeoutTest(true) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPollEventuallyReturnsRecordsWithZeroTimeout(groupProtocol: String): Unit = { - val numMessages = 100 - val producer = createProducer() - sendRecords(producer, numMessages, tp) - - val consumer = createConsumer() - consumer.subscribe(Set(topic).asJava) - val records = awaitNonEmptyRecords(consumer, tp, 0L) - assertEquals(numMessages, records.count()) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoOffsetForPartitionExceptionOnPollZero(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none") - val consumer = createConsumer(configOverrides = this.consumerConfig) - - consumer.assign(List(tp).asJava) - - // continuous poll should eventually fail because there is no offset reset strategy set (fail only when resetting positions after coordinator is known) - TestUtils.tryUntilNoAssertionError() { - assertThrows(classOf[NoOffsetForPartitionException], () => consumer.poll(Duration.ZERO)) - } - } - - def runMultiConsumerSessionTimeoutTest(closeConsumer: Boolean): Unit = { - // use consumers defined in this class plus one additional consumer - // Use topic defined in this class + one additional topic - val producer = createProducer() - sendRecords(producer, numRecords = 100, tp) - sendRecords(producer, numRecords = 100, tp2) - val topic1 = "topic1" - val subscriptions = Set(tp, tp2) ++ createTopicAndSendRecords(producer, topic1, 6, 100) - - // first subscribe consumers that are defined in this class - val consumerPollers = mutable.Buffer[ConsumerAssignmentPoller]() - consumerPollers += subscribeConsumerAndStartPolling(createConsumer(), List(topic, topic1)) - consumerPollers += subscribeConsumerAndStartPolling(createConsumer(), List(topic, topic1)) - - // create one more consumer and add it to the group; we will timeout this consumer - val timeoutConsumer = createConsumer() - val timeoutPoller = subscribeConsumerAndStartPolling(timeoutConsumer, List(topic, topic1)) - consumerPollers += timeoutPoller - - // validate the initial assignment - validateGroupAssignment(consumerPollers, subscriptions) - - // stop polling and close one of the consumers, should trigger partition re-assignment among alive consumers - timeoutPoller.shutdown() - consumerPollers -= timeoutPoller - if (closeConsumer) - timeoutConsumer.close() - - validateGroupAssignment(consumerPollers, subscriptions, - Some(s"Did not get valid assignment for partitions ${subscriptions.asJava} after one consumer left"), 3 * groupMaxSessionTimeoutMs) - - // done with pollers and consumers - for (poller <- consumerPollers) - poller.shutdown() - } -} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala deleted file mode 100644 index e1e74321106ee..0000000000000 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala +++ /dev/null @@ -1,423 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.utils.{TestInfoUtils, TestUtils} -import org.apache.kafka.clients.consumer._ -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.{InvalidRegularExpression, InvalidTopicException} -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.api.function.Executable -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource - -import java.time.Duration -import java.util.regex.Pattern -import scala.jdk.CollectionConverters._ - -/** - * Integration tests for the consumer that covers the subscribe and unsubscribe logic. - */ -@Timeout(600) -class PlaintextConsumerSubscriptionTest extends AbstractConsumerTest { - - /** - * Verifies that pattern subscription performs as expected. - * The pattern matches the topics 'topic' and 'tblablac', but not 'tblablak' or 'tblab1'. - * It is expected that the consumer is subscribed to all partitions of 'topic' and - * 'tblablac' after the subscription when metadata is refreshed. - * When a new topic 'tsomec' is added afterwards, it is expected that upon the next - * metadata refresh the consumer becomes subscribed to this new topic and all partitions - * of that topic are assigned to it. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscription(groupProtocol: String): Unit = { - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords, tp) - - val topic1 = "tblablac" // matches subscribed pattern - createTopic(topic1, 2, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 0)) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 1)) - - val topic2 = "tblablak" // does not match subscribed pattern - createTopic(topic2, 2, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic2, 0)) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic2, 1)) - - val topic3 = "tblab1" // does not match subscribed pattern - createTopic(topic3, 2, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic3, 0)) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic3, 1)) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - val pattern = Pattern.compile("t.*c") - consumer.subscribe(pattern, new TestConsumerReassignmentListener) - - var assignment = Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1), - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1)) - awaitAssignment(consumer, assignment) - - val topic4 = "tsomec" // matches subscribed pattern - createTopic(topic4, 2, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic4, 0)) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic4, 1)) - - assignment ++= Set( - new TopicPartition(topic4, 0), - new TopicPartition(topic4, 1)) - awaitAssignment(consumer, assignment) - - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - } - - /** - * Verifies that a second call to pattern subscription succeeds and performs as expected. - * The initial subscription is to a pattern that matches two topics 'topic' and 'foo'. - * The second subscription is to a pattern that matches 'foo' and a new topic 'bar'. - * It is expected that the consumer is subscribed to all partitions of 'topic' and 'foo' after - * the first subscription, and to all partitions of 'foo' and 'bar' after the second. - * The metadata refresh interval is intentionally increased to a large enough value to guarantee - * that it is the subscription call that triggers a metadata refresh, and not the timeout. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSubsequentPatternSubscription(groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "30000") - val consumer = createConsumer() - - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords = numRecords, tp) - - // the first topic ('topic') matches first subscription pattern only - - val fooTopic = "foo" // matches both subscription patterns - createTopic(fooTopic, 1, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(fooTopic, 0)) - - assertEquals(0, consumer.assignment().size) - - val pattern1 = Pattern.compile(".*o.*") // only 'topic' and 'foo' match this - consumer.subscribe(pattern1, new TestConsumerReassignmentListener) - - var assignment = Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1), - new TopicPartition(fooTopic, 0)) - awaitAssignment(consumer, assignment) - - val barTopic = "bar" // matches the next subscription pattern - createTopic(barTopic, 1, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(barTopic, 0)) - - val pattern2 = Pattern.compile("...") // only 'foo' and 'bar' match this - consumer.subscribe(pattern2, new TestConsumerReassignmentListener) - assignment --= Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1)) - assignment ++= Set( - new TopicPartition(barTopic, 0)) - awaitAssignment(consumer, assignment) - - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - } - - /** - * Verifies that pattern unsubscription performs as expected. - * The pattern matches the topics 'topic' and 'tblablac'. - * It is expected that the consumer is subscribed to all partitions of 'topic' and - * 'tblablac' after the subscription when metadata is refreshed. - * When consumer unsubscribes from all its subscriptions, it is expected that its - * assignments are cleared right away. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternUnsubscription(groupProtocol: String): Unit = { - val numRecords = 10000 - val producer = createProducer() - sendRecords(producer, numRecords, tp) - - val topic1 = "tblablac" // matches the subscription pattern - createTopic(topic1, 2, brokerCount) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 0)) - sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 1)) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - consumer.subscribe(Pattern.compile("t.*c"), new TestConsumerReassignmentListener) - val assignment = Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1), - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1)) - awaitAssignment(consumer, assignment) - - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRe2JPatternSubscription(groupProtocol: String): Unit = { - val topic1 = "tblablac" // matches subscribed pattern - createTopic(topic1, 2, brokerCount) - - val topic2 = "tblablak" // does not match subscribed pattern - createTopic(topic2, 2, brokerCount) - - val topic3 = "tblab1" // does not match subscribed pattern - createTopic(topic3, 2, brokerCount) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - var pattern = new SubscriptionPattern("t.*c") - consumer.subscribe(pattern) - - var assignment = Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1), - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1)) - awaitAssignment(consumer, assignment) - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - - // Subscribe to a different pattern to match topic2 (that did not match before) - pattern = new SubscriptionPattern(topic2 + ".*") - consumer.subscribe(pattern) - - assignment = Set( - new TopicPartition(topic2, 0), - new TopicPartition(topic2, 1)) - awaitAssignment(consumer, assignment) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRe2JPatternSubscriptionFetch(groupProtocol: String): Unit = { - val topic1 = "topic1" // matches subscribed pattern - createTopic(topic1, 2, brokerCount) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - val pattern = new SubscriptionPattern("topic.*") - consumer.subscribe(pattern) - - val assignment = Set( - new TopicPartition(topic, 0), - new TopicPartition(topic, 1), - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1)) - awaitAssignment(consumer, assignment) - - val producer = createProducer() - val totalRecords = 10L - val startingTimestamp = System.currentTimeMillis() - val tp = new TopicPartition(topic1, 0) - sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) - consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt, startingOffset = 0, startingTimestamp = startingTimestamp, tp = tp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRe2JPatternExpandSubscription(groupProtocol: String): Unit = { - val topic1 = "topic1" // matches first pattern - createTopic(topic1, 2, brokerCount) - - val topic2 = "topic2" // does not match first pattern - createTopic(topic2, 2, brokerCount) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - var pattern = new SubscriptionPattern("topic1.*") - consumer.subscribe(pattern) - val assignment = Set( - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1)) - awaitAssignment(consumer, assignment) - - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - - // Subscribe to a different pattern that should match - // the same topics the member already had plus new ones - pattern = new SubscriptionPattern("topic1|topic2") - consumer.subscribe(pattern) - - val expandedAssignment = assignment ++ Set(new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)) - awaitAssignment(consumer, expandedAssignment) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRe2JPatternSubscriptionAndTopicSubscription(groupProtocol: String): Unit = { - val topic1 = "topic1" // matches subscribed pattern - createTopic(topic1, 2, brokerCount) - - val topic11 = "topic11" // matches subscribed pattern - createTopic(topic11, 2, brokerCount) - - val topic2 = "topic2" // does not match subscribed pattern - createTopic(topic2, 2, brokerCount) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - // Subscribe to pattern - val pattern = new SubscriptionPattern("topic1.*") - consumer.subscribe(pattern) - val patternAssignment = Set( - new TopicPartition(topic1, 0), - new TopicPartition(topic1, 1), - new TopicPartition(topic11, 0), - new TopicPartition(topic11, 1)) - awaitAssignment(consumer, patternAssignment) - consumer.unsubscribe() - assertEquals(0, consumer.assignment().size) - - // Subscribe to explicit topic names - consumer.subscribe(List(topic2).asJava) - val assignment = Set( - new TopicPartition(topic2, 0), - new TopicPartition(topic2, 1)) - awaitAssignment(consumer, assignment) - consumer.unsubscribe() - - // Subscribe to pattern again - consumer.subscribe(pattern) - awaitAssignment(consumer, patternAssignment) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRe2JPatternSubscriptionInvalidRegex(groupProtocol: String): Unit = { - val consumer = createConsumer() - assertEquals(0, consumer.assignment().size) - - val pattern = new SubscriptionPattern("(t.*c") - consumer.subscribe(pattern) - - TestUtils.tryUntilNoAssertionError() { - assertThrows(classOf[InvalidRegularExpression], () => consumer.poll(Duration.ZERO)) - } - consumer.unsubscribe() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testExpandingTopicSubscriptions(groupProtocol: String): Unit = { - val otherTopic = "other" - val initialAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1)) - val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, initialAssignment) - - createTopic(otherTopic, 2, brokerCount) - val expandedAssignment = initialAssignment ++ Set(new TopicPartition(otherTopic, 0), new TopicPartition(otherTopic, 1)) - consumer.subscribe(List(topic, otherTopic).asJava) - awaitAssignment(consumer, expandedAssignment) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testShrinkingTopicSubscriptions(groupProtocol: String): Unit = { - val otherTopic = "other" - createTopic(otherTopic, 2, brokerCount) - val initialAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(otherTopic, 0), new TopicPartition(otherTopic, 1)) - val consumer = createConsumer() - consumer.subscribe(List(topic, otherTopic).asJava) - awaitAssignment(consumer, initialAssignment) - - val shrunkenAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1)) - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, shrunkenAssignment) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUnsubscribeTopic(groupProtocol: String): Unit = { - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") - } - val consumer = createConsumer() - - val listener = new TestConsumerReassignmentListener() - consumer.subscribe(List(topic).asJava, listener) - - // the initial subscription should cause a callback execution - awaitRebalance(consumer, listener) - - consumer.subscribe(List[String]().asJava) - assertEquals(0, consumer.assignment.size()) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSubscribeInvalidTopicCanUnsubscribe(groupProtocol: String): Unit = { - val consumer = createConsumer() - - setupSubscribeInvalidTopic(consumer) - if(groupProtocol == "consumer") { - // Must ensure memberId is not empty before sending leave group heartbeat. This is a temporary solution before KIP-1082. - TestUtils.waitUntilTrue(() => consumer.groupMetadata().memberId().nonEmpty, - waitTimeMs = 30000, msg = "Timeout waiting for first consumer group heartbeat response") - } - assertDoesNotThrow(new Executable { - override def execute(): Unit = consumer.unsubscribe() - }) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSubscribeInvalidTopicCanClose(groupProtocol: String): Unit = { - val consumer = createConsumer() - - setupSubscribeInvalidTopic(consumer) - assertDoesNotThrow(new Executable { - override def execute(): Unit = consumer.close() - }) - } - - def setupSubscribeInvalidTopic(consumer: Consumer[Array[Byte], Array[Byte]]): Unit = { - // Invalid topic name due to space - val invalidTopicName = "topic abc" - consumer.subscribe(List(invalidTopicName).asJava) - - var exception : InvalidTopicException = null - TestUtils.waitUntilTrue(() => { - try consumer.poll(Duration.ofMillis(500)) catch { - case e : InvalidTopicException => exception = e - case e : Throwable => fail(s"An InvalidTopicException should be thrown. But ${e.getClass} is thrown") - } - exception != null - }, waitTimeMs = 5000, msg = "An InvalidTopicException should be thrown.") - - assertEquals(s"Invalid topics: [${invalidTopicName}]", exception.getMessage) - } -} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala index f1c708acfdd3b..bbc4e6c350cbb 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala @@ -12,821 +12,20 @@ */ package kafka.api -import kafka.api.BaseConsumerTest.{DeserializerImpl, SerializerImpl} - -import java.time.Duration import java.util -import java.util.Arrays.asList -import java.util.{Collections, Locale, Optional, Properties} -import kafka.server.KafkaBroker import kafka.utils.{TestInfoUtils, TestUtils} -import org.apache.kafka.clients.admin.{NewPartitions, NewTopic} import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.errors.{InterruptException, InvalidGroupIdException, InvalidTopicException, TimeoutException, WakeupException} -import org.apache.kafka.common.record.{CompressionType, TimestampType} -import org.apache.kafka.common.serialization._ +import org.apache.kafka.common.errors.InterruptException import org.apache.kafka.common.test.api.Flaky -import org.apache.kafka.common.{MetricName, TopicPartition} -import org.apache.kafka.server.quota.QuotaType -import org.apache.kafka.test.{MockConsumerInterceptor, MockProducerInterceptor} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import java.util.concurrent.{CompletableFuture, ExecutionException, TimeUnit} -import scala.jdk.CollectionConverters._ - -@Timeout(600) -class PlaintextConsumerTest extends BaseConsumerTest { - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testHeaders(groupProtocol: String): Unit = { - val numRecords = 1 - val record = new ProducerRecord(tp.topic, tp.partition, null, "key".getBytes, "value".getBytes) - - record.headers().add("headerKey", "headerValue".getBytes) - - val producer = createProducer() - producer.send(record) - - val consumer = createConsumer() - assertEquals(0, consumer.assignment.size) - consumer.assign(List(tp).asJava) - assertEquals(1, consumer.assignment.size) - - consumer.seek(tp, 0) - val records = consumeRecords(consumer = consumer, numRecords = numRecords) - - assertEquals(numRecords, records.size) - - for (i <- 0 until numRecords) { - val record = records(i) - val header = record.headers().lastHeader("headerKey") - assertEquals("headerValue", if (header == null) null else new String(header.value())) - } - } - - private def testHeadersSerializeDeserialize(serializer: Serializer[Array[Byte]], deserializer: Deserializer[Array[Byte]]): Unit = { - val numRecords = 1 - val record = new ProducerRecord(tp.topic, tp.partition, null, "key".getBytes, "value".getBytes) - - val producer = createProducer( - keySerializer = new ByteArraySerializer, - valueSerializer = serializer) - producer.send(record) - - val consumer = createConsumer( - keyDeserializer = new ByteArrayDeserializer, - valueDeserializer = deserializer) - assertEquals(0, consumer.assignment.size) - consumer.assign(List(tp).asJava) - assertEquals(1, consumer.assignment.size) - - consumer.seek(tp, 0) - val records = consumeRecords(consumer = consumer, numRecords = numRecords) - - assertEquals(numRecords, records.size) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testHeadersSerializerDeserializer(groupProtocol: String): Unit = { - val extendedSerializer = new SerializerImpl - - val extendedDeserializer = new DeserializerImpl - - testHeadersSerializeDeserialize(extendedSerializer, extendedDeserializer) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoOffsetReset(groupProtocol: String): Unit = { - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 1, tp, startingTimestamp = startingTimestamp) - - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testGroupConsumption(groupProtocol: String): Unit = { - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 10, tp, startingTimestamp = startingTimestamp) - - val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPartitionsFor(groupProtocol: String): Unit = { - val numParts = 2 - createTopic("part-test", numParts) - val consumer = createConsumer() - val parts = consumer.partitionsFor("part-test") - assertNotNull(parts) - assertEquals(2, parts.size) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPartitionsForAutoCreate(groupProtocol: String): Unit = { - val consumer = createConsumer() - // First call would create the topic - consumer.partitionsFor("non-exist-topic") - TestUtils.waitUntilTrue(() => { - !consumer.partitionsFor("non-exist-topic").isEmpty - }, s"Timed out while awaiting non empty partitions.") - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPartitionsForInvalidTopic(groupProtocol: String): Unit = { - val consumer = createConsumer() - assertThrows(classOf[InvalidTopicException], () => consumer.partitionsFor(";3# ads,{234")) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSeek(groupProtocol: String): Unit = { - val consumer = createConsumer() - val totalRecords = 50L - val mid = totalRecords / 2 - - // Test seek non-compressed message - val producer = createProducer() - val startingTimestamp = 0 - sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) - consumer.assign(List(tp).asJava) - - consumer.seekToEnd(List(tp).asJava) - assertEquals(totalRecords, consumer.position(tp)) - assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) - - consumer.seekToBeginning(List(tp).asJava) - assertEquals(0L, consumer.position(tp)) - consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) - - consumer.seek(tp, mid) - assertEquals(mid, consumer.position(tp)) - - consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = mid.toInt, startingKeyAndValueIndex = mid.toInt, - startingTimestamp = mid) - - // Test seek compressed message - sendCompressedMessages(totalRecords.toInt, tp2) - consumer.assign(List(tp2).asJava) - - consumer.seekToEnd(List(tp2).asJava) - assertEquals(totalRecords, consumer.position(tp2)) - assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) - - consumer.seekToBeginning(List(tp2).asJava) - assertEquals(0L, consumer.position(tp2)) - consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = 0, tp = tp2) - - consumer.seek(tp2, mid) - assertEquals(mid, consumer.position(tp2)) - consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = mid.toInt, startingKeyAndValueIndex = mid.toInt, - startingTimestamp = mid, tp = tp2) - } - - private def sendCompressedMessages(numRecords: Int, tp: TopicPartition): Unit = { - val producerProps = new Properties() - producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, CompressionType.GZIP.name) - producerProps.setProperty(ProducerConfig.LINGER_MS_CONFIG, Int.MaxValue.toString) - val producer = createProducer(configOverrides = producerProps) - (0 until numRecords).foreach { i => - producer.send(new ProducerRecord(tp.topic, tp.partition, i.toLong, s"key $i".getBytes, s"value $i".getBytes)) - } - producer.close() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPartitionPauseAndResume(groupProtocol: String): Unit = { - val partitions = List(tp).asJava - val producer = createProducer() - var startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) - - val consumer = createConsumer() - consumer.assign(partitions) - consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) - consumer.pause(partitions) - startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) - assertTrue(consumer.poll(Duration.ofMillis(100)).isEmpty) - consumer.resume(partitions) - consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 5, startingTimestamp = startingTimestamp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testInterceptors(groupProtocol: String): Unit = { - val appendStr = "mock" - MockConsumerInterceptor.resetCounters() - MockProducerInterceptor.resetCounters() - - // create producer with interceptor - val producerProps = new Properties() - producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockProducerInterceptor].getName) - producerProps.put("mock.interceptor.append", appendStr) - val testProducer = createProducer(keySerializer = new StringSerializer, - valueSerializer = new StringSerializer, - configOverrides = producerProps) - - // produce records - val numRecords = 10 - (0 until numRecords).map { i => - testProducer.send(new ProducerRecord(tp.topic, tp.partition, s"key $i", s"value $i")) - }.foreach(_.get) - assertEquals(numRecords, MockProducerInterceptor.ONSEND_COUNT.intValue) - assertEquals(numRecords, MockProducerInterceptor.ON_SUCCESS_COUNT.intValue) - // send invalid record - assertThrows(classOf[Throwable], () => testProducer.send(null), () => "Should not allow sending a null record") - assertEquals(1, MockProducerInterceptor.ON_ERROR_COUNT.intValue, "Interceptor should be notified about exception") - assertEquals(0, MockProducerInterceptor.ON_ERROR_WITH_METADATA_COUNT.intValue(), "Interceptor should not receive metadata with an exception when record is null") - - // create consumer with interceptor - this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") - val testConsumer = createConsumer(keyDeserializer = new StringDeserializer, valueDeserializer = new StringDeserializer) - testConsumer.assign(List(tp).asJava) - testConsumer.seek(tp, 0) - - // consume and verify that values are modified by interceptors - val records = consumeRecords(testConsumer, numRecords) - for (i <- 0 until numRecords) { - val record = records(i) - assertEquals(s"key $i", new String(record.key)) - assertEquals(s"value $i$appendStr".toUpperCase(Locale.ROOT), new String(record.value)) - } - - // commit sync and verify onCommit is called - val commitCountBefore = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue - testConsumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(2L))).asJava) - assertEquals(2, testConsumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(commitCountBefore + 1, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue) - - // commit async and verify onCommit is called - sendAndAwaitAsyncCommit(testConsumer, Some(Map(tp -> new OffsetAndMetadata(5L)))) - assertEquals(5, testConsumer.committed(Set(tp).asJava).get(tp).offset) - assertEquals(commitCountBefore + 2, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue) - - testConsumer.close() - testProducer.close() - - // cleanup - MockConsumerInterceptor.resetCounters() - MockProducerInterceptor.resetCounters() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testInterceptorsWithWrongKeyValue(groupProtocol: String): Unit = { - val appendStr = "mock" - // create producer with interceptor that has different key and value types from the producer - val producerProps = new Properties() - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockProducerInterceptor") - producerProps.put("mock.interceptor.append", appendStr) - val testProducer = createProducer() - - // producing records should succeed - testProducer.send(new ProducerRecord(tp.topic(), tp.partition(), s"key".getBytes, s"value will not be modified".getBytes)) - - // create consumer with interceptor that has different key and value types from the consumer - this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") - val testConsumer = createConsumer() - - testConsumer.assign(List(tp).asJava) - testConsumer.seek(tp, 0) - - // consume and verify that values are not modified by interceptors -- their exceptions are caught and logged, but not propagated - val records = consumeRecords(testConsumer, 1) - val record = records.head - assertEquals(s"value will not be modified", new String(record.value())) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeMessagesWithCreateTime(groupProtocol: String): Unit = { - val numRecords = 50 - // Test non-compressed messages - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) - - // Test compressed messages - sendCompressedMessages(numRecords, tp2) - consumer.assign(List(tp2).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp2, startingOffset = 0) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeMessagesWithLogAppendTime(groupProtocol: String): Unit = { - val topicName = "testConsumeMessagesWithLogAppendTime" - val topicProps = new Properties() - topicProps.setProperty(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "LogAppendTime") - createTopic(topicName, 2, 2, topicProps) - - val startTime = System.currentTimeMillis() - val numRecords = 50 - - // Test non-compressed messages - val tp1 = new TopicPartition(topicName, 0) - val producer = createProducer() - sendRecords(producer, numRecords, tp1) - - val consumer = createConsumer() - consumer.assign(List(tp1).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp1, startingOffset = 0, - startingTimestamp = startTime, timestampType = TimestampType.LOG_APPEND_TIME) - - // Test compressed messages - val tp2 = new TopicPartition(topicName, 1) - sendCompressedMessages(numRecords, tp2) - consumer.assign(List(tp2).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp2, startingOffset = 0, - startingTimestamp = startTime, timestampType = TimestampType.LOG_APPEND_TIME) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListTopics(groupProtocol: String): Unit = { - val numParts = 2 - val topic1 = "part-test-topic-1" - val topic2 = "part-test-topic-2" - val topic3 = "part-test-topic-3" - createTopic(topic1, numParts) - createTopic(topic2, numParts) - createTopic(topic3, numParts) - - val consumer = createConsumer() - val topics = consumer.listTopics() - assertNotNull(topics) - assertEquals(5, topics.size()) - assertEquals(5, topics.keySet().size()) - assertEquals(2, topics.get(topic1).size) - assertEquals(2, topics.get(topic2).size) - assertEquals(2, topics.get(topic3).size) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPauseStateNotPreservedByRebalance(groupProtocol: String): Unit = { - if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") - } - val consumer = createConsumer() - - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) - consumer.subscribe(List(topic).asJava) - consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) - consumer.pause(List(tp).asJava) - - // subscribe to a new topic to trigger a rebalance - consumer.subscribe(List("topic2").asJava) - - // after rebalance, our position should be reset and our pause state lost, - // so we should be able to consume from the beginning - consumeAndVerifyRecords(consumer = consumer, numRecords = 0, startingOffset = 5, startingTimestamp = startingTimestamp) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLeadMetricsCleanUpWithSubscribe(groupProtocol: String): Unit = { - val numMessages = 1000 - val topic2 = "topic2" - createTopic(topic2, 2, brokerCount) - // send some messages. - val producer = createProducer() - sendRecords(producer, numMessages, tp) - // Test subscribe - // Create a consumer and consumer some messages. - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithSubscribe") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithSubscribe") - val consumer = createConsumer() - val listener = new TestConsumerReassignmentListener - consumer.subscribe(List(topic, topic2).asJava, listener) - val records = awaitNonEmptyRecords(consumer, tp) - assertEquals(1, listener.callsToAssigned, "should be assigned once") - // Verify the metric exist. - val tags1 = new util.HashMap[String, String]() - tags1.put("client-id", "testPerPartitionLeadMetricsCleanUpWithSubscribe") - tags1.put("topic", tp.topic()) - tags1.put("partition", String.valueOf(tp.partition())) - - val tags2 = new util.HashMap[String, String]() - tags2.put("client-id", "testPerPartitionLeadMetricsCleanUpWithSubscribe") - tags2.put("topic", tp2.topic()) - tags2.put("partition", String.valueOf(tp2.partition())) - val fetchLead0 = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1)) - assertNotNull(fetchLead0) - assertEquals(records.count.toDouble, fetchLead0.metricValue(), s"The lead should be ${records.count}") - - // Remove topic from subscription - consumer.subscribe(List(topic2).asJava, listener) - awaitRebalance(consumer, listener) - // Verify the metric has gone - assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1))) - assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags2))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLagMetricsCleanUpWithSubscribe(groupProtocol: String): Unit = { - val numMessages = 1000 - val topic2 = "topic2" - createTopic(topic2, 2, brokerCount) - // send some messages. - val producer = createProducer() - sendRecords(producer, numMessages, tp) - // Test subscribe - // Create a consumer and consumer some messages. - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithSubscribe") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithSubscribe") - val consumer = createConsumer() - val listener = new TestConsumerReassignmentListener - consumer.subscribe(List(topic, topic2).asJava, listener) - val records = awaitNonEmptyRecords(consumer, tp) - assertEquals(1, listener.callsToAssigned, "should be assigned once") - // Verify the metric exist. - val tags1 = new util.HashMap[String, String]() - tags1.put("client-id", "testPerPartitionLagMetricsCleanUpWithSubscribe") - tags1.put("topic", tp.topic()) - tags1.put("partition", String.valueOf(tp.partition())) - - val tags2 = new util.HashMap[String, String]() - tags2.put("client-id", "testPerPartitionLagMetricsCleanUpWithSubscribe") - tags2.put("topic", tp2.topic()) - tags2.put("partition", String.valueOf(tp2.partition())) - val fetchLag0 = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1)) - assertNotNull(fetchLag0) - val expectedLag = numMessages - records.count - assertEquals(expectedLag, fetchLag0.metricValue.asInstanceOf[Double], epsilon, s"The lag should be $expectedLag") - - // Remove topic from subscription - consumer.subscribe(List(topic2).asJava, listener) - awaitRebalance(consumer, listener) - // Verify the metric has gone - assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1))) - assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags2))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLeadMetricsCleanUpWithAssign(groupProtocol: String): Unit = { - val numMessages = 1000 - // Test assign - // send some messages. - val producer = createProducer() - sendRecords(producer, numMessages, tp) - sendRecords(producer, numMessages, tp2) - - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithAssign") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithAssign") - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - val records = awaitNonEmptyRecords(consumer, tp) - // Verify the metric exist. - val tags = new util.HashMap[String, String]() - tags.put("client-id", "testPerPartitionLeadMetricsCleanUpWithAssign") - tags.put("topic", tp.topic()) - tags.put("partition", String.valueOf(tp.partition())) - val fetchLead = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)) - assertNotNull(fetchLead) - - assertEquals(records.count.toDouble, fetchLead.metricValue(), s"The lead should be ${records.count}") - - consumer.assign(List(tp2).asJava) - awaitNonEmptyRecords(consumer ,tp2) - assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLagMetricsCleanUpWithAssign(groupProtocol: String): Unit = { - val numMessages = 1000 - // Test assign - // send some messages. - val producer = createProducer() - sendRecords(producer, numMessages, tp) - sendRecords(producer, numMessages, tp2) - - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - val records = awaitNonEmptyRecords(consumer, tp) - // Verify the metric exist. - val tags = new util.HashMap[String, String]() - tags.put("client-id", "testPerPartitionLagMetricsCleanUpWithAssign") - tags.put("topic", tp.topic()) - tags.put("partition", String.valueOf(tp.partition())) - val fetchLag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) - assertNotNull(fetchLag) - - val expectedLag = numMessages - records.count - assertEquals(expectedLag, fetchLag.metricValue.asInstanceOf[Double], epsilon, s"The lag should be $expectedLag") - - consumer.assign(List(tp2).asJava) - awaitNonEmptyRecords(consumer, tp2) - assertNull(consumer.metrics.get(new MetricName(tp.toString + ".records-lag", "consumer-fetch-manager-metrics", "", tags))) - assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPerPartitionLagMetricsWhenReadCommitted(groupProtocol: String): Unit = { - val numMessages = 1000 - // send some messages. - val producer = createProducer() - sendRecords(producer, numMessages, tp) - sendRecords(producer, numMessages, tp2) - - consumerConfig.setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") - consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") - consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - awaitNonEmptyRecords(consumer, tp) - // Verify the metric exist. - val tags = new util.HashMap[String, String]() - tags.put("client-id", "testPerPartitionLagMetricsCleanUpWithAssign") - tags.put("topic", tp.topic()) - tags.put("partition", String.valueOf(tp.partition())) - val fetchLag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) - assertNotNull(fetchLag) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testQuotaMetricsNotCreatedIfNoQuotasConfigured(groupProtocol: String): Unit = { - val numRecords = 1000 - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) - - val consumer = createConsumer() - consumer.assign(List(tp).asJava) - consumer.seek(tp, 0) - consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) - - def assertNoMetric(broker: KafkaBroker, name: String, quotaType: QuotaType, clientId: String): Unit = { - val metricName = broker.metrics.metricName("throttle-time", - quotaType.toString, - "", - "user", "", - "client-id", clientId) - assertNull(broker.metrics.metric(metricName), "Metric should not have been created " + metricName) - } - brokers.foreach(assertNoMetric(_, "byte-rate", QuotaType.PRODUCE, producerClientId)) - brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.PRODUCE, producerClientId)) - brokers.foreach(assertNoMetric(_, "byte-rate", QuotaType.FETCH, consumerClientId)) - brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.FETCH, consumerClientId)) - - brokers.foreach(assertNoMetric(_, "request-time", QuotaType.REQUEST, producerClientId)) - brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.REQUEST, producerClientId)) - brokers.foreach(assertNoMetric(_, "request-time", QuotaType.REQUEST, consumerClientId)) - brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.REQUEST, consumerClientId)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumingWithNullGroupId(groupProtocol: String): Unit = { - val topic = "test_topic" - val partition = 0 - val tp = new TopicPartition(topic, partition) - createTopic(topic) - - val producer = createProducer() - producer.send(new ProducerRecord(topic, partition, "k1".getBytes, "v1".getBytes)).get() - producer.send(new ProducerRecord(topic, partition, "k2".getBytes, "v2".getBytes)).get() - producer.send(new ProducerRecord(topic, partition, "k3".getBytes, "v3".getBytes)).get() - producer.close() - - // consumer 1 uses the default group id and consumes from earliest offset - val consumer1Config = new Properties(consumerConfig) - consumer1Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") - consumer1Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer1") - val consumer1 = createConsumer( - configOverrides = consumer1Config, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - - // consumer 2 uses the default group id and consumes from latest offset - val consumer2Config = new Properties(consumerConfig) - consumer2Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") - consumer2Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer2") - val consumer2 = createConsumer( - configOverrides = consumer2Config, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - - // consumer 3 uses the default group id and starts from an explicit offset - val consumer3Config = new Properties(consumerConfig) - consumer3Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer3") - val consumer3 = createConsumer( - configOverrides = consumer3Config, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - - consumer1.assign(asList(tp)) - consumer2.assign(asList(tp)) - consumer3.assign(asList(tp)) - consumer3.seek(tp, 1) - - val numRecords1 = consumer1.poll(Duration.ofMillis(5000)).count() - assertThrows(classOf[InvalidGroupIdException], () => consumer1.commitSync()) - assertThrows(classOf[InvalidGroupIdException], () => consumer2.committed(Set(tp).asJava)) - - val numRecords2 = consumer2.poll(Duration.ofMillis(5000)).count() - val numRecords3 = consumer3.poll(Duration.ofMillis(5000)).count() - - consumer1.unsubscribe() - consumer2.unsubscribe() - consumer3.unsubscribe() +import java.util.concurrent.ExecutionException - assertTrue(consumer1.assignment().isEmpty) - assertTrue(consumer2.assignment().isEmpty) - assertTrue(consumer3.assignment().isEmpty) - - consumer1.close() - consumer2.close() - consumer3.close() - - assertEquals(3, numRecords1, "Expected consumer1 to consume from earliest offset") - assertEquals(0, numRecords2, "Expected consumer2 to consume from latest offset") - assertEquals(2, numRecords3, "Expected consumer3 to consume from offset 1") - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNullGroupIdNotSupportedIfCommitting(groupProtocol: String): Unit = { - val consumer1Config = new Properties(consumerConfig) - consumer1Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") - consumer1Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer1") - val consumer1 = createConsumer( - configOverrides = consumer1Config, - configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - - consumer1.assign(List(tp).asJava) - assertThrows(classOf[InvalidGroupIdException], () => consumer1.commitSync()) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testStaticConsumerDetectsNewPartitionCreatedAfterRestart(groupProtocol: String): Unit = { - val foo = "foo" - val foo0 = new TopicPartition(foo, 0) - val foo1 = new TopicPartition(foo, 1) - - val admin = createAdminClient() - admin.createTopics(Seq(new NewTopic(foo, 1, 1.toShort)).asJava).all.get - - val consumerConfig = new Properties - consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group-id") - consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, "my-instance-id") - - val consumer1 = createConsumer(configOverrides = consumerConfig) - consumer1.subscribe(Seq(foo).asJava) - awaitAssignment(consumer1, Set(foo0)) - consumer1.close() - - val consumer2 = createConsumer(configOverrides = consumerConfig) - consumer2.subscribe(Seq(foo).asJava) - awaitAssignment(consumer2, Set(foo0)) - - admin.createPartitions(Map(foo -> NewPartitions.increaseTo(2)).asJava).all.get - - awaitAssignment(consumer2, Set(foo0, foo1)) - - consumer2.close() - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testEndOffsets(groupProtocol: String): Unit = { - val producer = createProducer() - val startingTimestamp = System.currentTimeMillis() - val numRecords = 10000 - (0 until numRecords).map { i => - val timestamp = startingTimestamp + i.toLong - val record = new ProducerRecord(tp.topic(), tp.partition(), timestamp, s"key $i".getBytes, s"value $i".getBytes) - producer.send(record) - record - } - producer.flush() - - val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) - awaitAssignment(consumer, Set(tp, tp2)) - - val endOffsets = consumer.endOffsets(Set(tp).asJava) - assertEquals(numRecords, endOffsets.get(tp)) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSeekThrowsIllegalStateIfPartitionsNotAssigned(groupProtocol: String): Unit = { - val tp = new TopicPartition(topic, 0) - val consumer = createConsumer(configOverrides = consumerConfig) - val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.seekToEnd(Collections.singletonList(tp))) - assertEquals("No current assignment for partition " + tp, e.getMessage) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchOffsetsForTime(groupProtocol: String): Unit = { - val numPartitions = 2 - val producer = createProducer() - val timestampsToSearch = new util.HashMap[TopicPartition, java.lang.Long]() - var i = 0 - for (part <- 0 until numPartitions) { - val tp = new TopicPartition(topic, part) - // key, val, and timestamp equal to the sequence number. - sendRecords(producer, numRecords = 100, tp, startingTimestamp = 0) - timestampsToSearch.put(tp, (i * 20).toLong) - i += 1 - } - - val consumer = createConsumer() - // Test negative target time - assertThrows(classOf[IllegalArgumentException], - () => consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(topic, 0), -1))) - val timestampOffsets = consumer.offsetsForTimes(timestampsToSearch) - - val timestampTp0 = timestampOffsets.get(new TopicPartition(topic, 0)) - assertEquals(0, timestampTp0.offset) - assertEquals(0, timestampTp0.timestamp) - assertEquals(Optional.of(0), timestampTp0.leaderEpoch) - - val timestampTp1 = timestampOffsets.get(new TopicPartition(topic, 1)) - assertEquals(20, timestampTp1.offset) - assertEquals(20, timestampTp1.timestamp) - assertEquals(Optional.of(0), timestampTp1.leaderEpoch) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - @Timeout(15) - def testPositionRespectsTimeout(groupProtocol: String): Unit = { - val topicPartition = new TopicPartition(topic, 15) - val consumer = createConsumer() - consumer.assign(List(topicPartition).asJava) - - // When position() is called for a topic/partition that doesn't exist, the consumer will repeatedly update the - // local metadata. However, it should give up after the user-supplied timeout has past. - assertThrows(classOf[TimeoutException], () => consumer.position(topicPartition, Duration.ofSeconds(3))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - @Timeout(15) - def testPositionRespectsWakeup(groupProtocol: String): Unit = { - val topicPartition = new TopicPartition(topic, 15) - val consumer = createConsumer() - consumer.assign(List(topicPartition).asJava) - - CompletableFuture.runAsync { () => - TimeUnit.SECONDS.sleep(1) - consumer.wakeup() - } - - assertThrows(classOf[WakeupException], () => consumer.position(topicPartition, Duration.ofSeconds(3))) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - @Timeout(15) - def testPositionWithErrorConnectionRespectsWakeup(groupProtocol: String): Unit = { - val topicPartition = new TopicPartition(topic, 15) - val properties = new Properties() - properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:12345") // make sure the connection fails - val consumer = createConsumer(configOverrides = properties) - consumer.assign(List(topicPartition).asJava) - - CompletableFuture.runAsync { () => - TimeUnit.SECONDS.sleep(1) - consumer.wakeup() - } - - assertThrows(classOf[WakeupException], () => consumer.position(topicPartition, Duration.ofSeconds(100))) - } +@Timeout(60) +class PlaintextConsumerTest extends AbstractConsumerTest { @Flaky("KAFKA-18031") @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -835,7 +34,7 @@ class PlaintextConsumerTest extends BaseConsumerTest { val adminClient = createAdminClient() val consumer = createConsumer() val listener = new TestConsumerReassignmentListener() - consumer.subscribe(List(topic).asJava, listener) + consumer.subscribe(java.util.List.of(topic), listener) awaitRebalance(consumer, listener) assertEquals(1, listener.callsToAssigned) @@ -862,7 +61,7 @@ class PlaintextConsumerTest extends BaseConsumerTest { () => { try { val groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG) - val groupDescription = adminClient.describeConsumerGroups (Collections.singletonList (groupId) ).describedGroups.get (groupId).get + val groupDescription = adminClient.describeConsumerGroups(util.List.of(groupId)).describedGroups.get(groupId).get groupDescription.members.isEmpty } catch { case _: ExecutionException | _: InterruptedException => diff --git a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala index 1acd22dc3fa4b..18d34ad05e9da 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala @@ -21,10 +21,8 @@ import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth._ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder import org.apache.kafka.clients.admin.AdminClientConfig -import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import org.apache.kafka.common.errors.TopicAuthorizationException // This test case uses a separate listener for client and inter-broker communication, from @@ -88,9 +86,8 @@ class PlaintextEndToEndAuthorizationTest extends EndToEndAuthorizationTest { superuserClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers(interBrokerListenerName)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testListenerName(quorum: String): Unit = { + @Test + def testListenerName(): Unit = { // To check the client listener name, establish a session on the server by sending any request eg sendRecords val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords = 1, tp)) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala index 6e3bbf4aed701..dc8b9423304ef 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala @@ -28,7 +28,7 @@ import org.apache.kafka.common.record.{DefaultRecord, DefaultRecordBatch, Record import org.apache.kafka.common.serialization.ByteArraySerializer import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout +import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} @@ -37,6 +37,22 @@ import java.nio.charset.StandardCharsets class PlaintextProducerSendTest extends BaseProducerSendTest { + // topic auto creation is enabled by default, only some tests disable it + var disableAutoTopicCreation = false + + override def brokerOverrides: Properties = { + val props = super.brokerOverrides + if (disableAutoTopicCreation) { + props.put("auto.create.topics.enable", "false") + } + props + } + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + disableAutoTopicCreation = testInfo.getDisplayName.contains("autoCreateTopicsEnabled=false") + super.setUp(testInfo) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("getTestGroupProtocolParametersAll")) def testWrongSerializer(groupProtocol: String): Unit = { @@ -121,6 +137,39 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { } } + /** + * Test error message received when send fails waiting on metadata for a topic that does not exist. + * No need to run this for both rebalance protocols. + */ + @ParameterizedTest(name = "groupProtocol={0}.autoCreateTopicsEnabled={1}") + @MethodSource(Array("protocolAndAutoCreateTopicProviders")) + def testSendTimeoutErrorMessageWhenTopicDoesNotExist(groupProtocol: String, autoCreateTopicsEnabled: String): Unit = { + val producer = createProducer(maxBlockMs = 500) + val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) + val exception = assertThrows(classOf[ExecutionException], () => producer.send(record).get) + assertInstanceOf(classOf[TimeoutException], exception.getCause) + assertEquals("Topic topic not present in metadata after 500 ms.", exception.getCause.getMessage) + } + + /** + * Test error message received when send fails waiting on metadata for a partition that does not exist (topic exists). + * No need to run this for both rebalance protocols. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) + @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) + def testSendTimeoutErrorWhenPartitionDoesNotExist(groupProtocol: String): Unit = { + val producer = createProducer(maxBlockMs = 500) + // Send a message to auto-create the topic + var record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) + assertEquals(0L, producer.send(record).get.offset, "Should have offset 0") + + // Send another message to the topic that exists but to a partition that does not + record = new ProducerRecord(topic, 10, "key".getBytes, "value".getBytes) + val exception = assertThrows(classOf[ExecutionException], () => producer.send(record).get) + assertInstanceOf(classOf[TimeoutException], exception.getCause) + assertEquals("Partition 10 of topic topic with partition count 4 is not present in metadata after 500 ms.", exception.getCause.getMessage) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("timestampConfigProvider")) def testSendWithInvalidBeforeAndAfterTimestamp(groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { @@ -285,4 +334,10 @@ object PlaintextProducerSendTest { } data.stream() } + + def protocolAndAutoCreateTopicProviders: java.util.stream.Stream[Arguments] = { + val data = new java.util.ArrayList[Arguments]() + data.add(Arguments.of("classic", "false")) + data.stream() + } } \ No newline at end of file diff --git a/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala b/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala deleted file mode 100644 index 0ee52530e57ff..0000000000000 --- a/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.api - -import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.NewPartitionReassignment -import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} -import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -import java.nio.charset.StandardCharsets -import java.util -import java.util.Optional -import scala.jdk.CollectionConverters._ - - -class ProducerSendWhileDeletionTest extends IntegrationTestHarness { - val producerCount: Int = 1 - val brokerCount: Int = 2 - val defaultLingerMs: Int = 5; - - serverConfig.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 2.toString) - serverConfig.put(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, 2.toString) - serverConfig.put(ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG, false.toString) - - producerConfig.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5000L.toString) - producerConfig.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10000.toString) - producerConfig.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, (10000 + defaultLingerMs).toString) - - /** - * Tests that Producer gets self-recovered when a topic is deleted mid-way of produce. - * - * Producer will attempt to send messages to the partition specified in each record, and should - * succeed as long as the partition is included in the metadata. - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testSendWithTopicDeletionMidWay(quorum: String): Unit = { - val numRecords = 10 - val topic = "topic" - - // Create topic with leader as 0 for the 2 partitions. - createTopicWithAssignment(topic, Map(0 -> Seq(0, 1), 1 -> Seq(0, 1))) - - val reassignment = Map( - new TopicPartition(topic, 0) -> Optional.of(new NewPartitionReassignment(util.Arrays.asList(1, 0))), - new TopicPartition(topic, 1) -> Optional.of(new NewPartitionReassignment(util.Arrays.asList(1, 0))) - ) - - // Change leader to 1 for both the partitions to increase leader epoch from 0 -> 1 - val admin = createAdminClient() - admin.alterPartitionReassignments(reassignment.asJava).all().get() - - val producer = createProducer() - - (1 to numRecords).foreach { i => - val resp = producer.send(new ProducerRecord(topic, null, ("value" + i).getBytes(StandardCharsets.UTF_8))).get - assertEquals(topic, resp.topic()) - } - - // Start topic deletion - deleteTopic(topic, listenerName) - - // Verify that the topic is deleted when no metadata request comes in - TestUtils.verifyTopicDeletion(topic, 2, brokers) - - // Producer should be able to send messages even after topic gets deleted and auto-created - assertEquals(topic, producer.send(new ProducerRecord(topic, null, "value".getBytes(StandardCharsets.UTF_8))).get.topic()) - } - -} diff --git a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala index cf44bffbdc4c1..c08c43081e6a2 100644 --- a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala @@ -15,15 +15,14 @@ package kafka.api import kafka.security.JaasTestUtils import java.time.Duration -import java.util.{Collections, Properties} +import java.util.Properties import java.util.concurrent.{ExecutionException, TimeUnit} -import scala.jdk.CollectionConverters._ import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.errors.SaslAuthenticationException -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.common.config.SaslConfigs @@ -37,7 +36,6 @@ import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import scala.jdk.javaapi.OptionConverters import scala.util.Using - class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { private val kafkaClientSaslMechanism = "SCRAM-SHA-256" private val kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) @@ -63,7 +61,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setScramArguments( - List(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) + java.util.List.of(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) } override def createPrivilegedAdminClient() = { @@ -91,7 +89,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { closeSasl() } - @ParameterizedTest(name="{displayName}.quorum=kraft.isIdempotenceEnabled={0}") + @ParameterizedTest(name="{displayName}.isIdempotenceEnabled={0}") @ValueSource(booleans = Array(true, false)) def testProducerWithAuthenticationFailure(isIdempotenceEnabled: Boolean): Unit = { val prop = new Properties() @@ -111,9 +109,8 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(sendOneRecord(producer2))() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTransactionalProducerWithAuthenticationFailure(quorum: String): Unit = { + @Test + def testTransactionalProducerWithAuthenticationFailure(): Unit = { val txProducer = createTransactionalProducer() verifyAuthenticationException(txProducer.initTransactions()) @@ -125,7 +122,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testConsumerWithAuthenticationFailure(groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.subscribe(List(topic).asJava) + consumer.subscribe(java.util.List.of(topic)) verifyConsumerWithAuthenticationFailure(consumer) } @@ -133,7 +130,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testManualAssignmentConsumerWithAuthenticationFailure(groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) verifyConsumerWithAuthenticationFailure(consumer) } @@ -142,7 +139,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { def testManualAssignmentConsumerWithAutoCommitDisabledWithAuthenticationFailure(groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) val consumer = createConsumer() - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) consumer.seek(tp, 0) verifyConsumerWithAuthenticationFailure(consumer) } @@ -157,16 +154,15 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(consumer.poll(Duration.ofMillis(1000)))(_.count == 1) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testKafkaAdminClientWithAuthenticationFailure(quorum: String): Unit = { + @Test + def testKafkaAdminClientWithAuthenticationFailure(): Unit = { val props = JaasTestUtils.adminClientSecurityConfigs(securityProtocol, OptionConverters.toJava(trustStoreFile), OptionConverters.toJava(clientSaslProperties)) props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) val adminClient = Admin.create(props) def describeTopic(): Unit = { try { - val response = adminClient.describeTopics(Collections.singleton(topic)).allTopicNames.get + val response = adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames.get assertEquals(1, response.size) response.forEach { (_, description) => assertEquals(numPartitions, description.partitions.size) diff --git a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala index c2ff04d449700..ceff3d4d6b0a7 100644 --- a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala @@ -26,8 +26,6 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue, fail} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import scala.jdk.CollectionConverters._ - abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { override protected def securityProtocol = SecurityProtocol.SASL_SSL override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) @@ -70,8 +68,8 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { consumerConfig.remove(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS) val consumer2 = createConsumer() - consumer1.assign(List(tp).asJava) - consumer2.assign(List(tp).asJava) + consumer1.assign(java.util.List.of(tp)) + consumer2.assign(java.util.List.of(tp)) consumeRecords(consumer1, numRecords) diff --git a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala index d98c2b00c04c7..920dc109ea99c 100644 --- a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala @@ -19,8 +19,6 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import scala.jdk.CollectionConverters._ - @Timeout(600) class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { private val kafkaClientSaslMechanism = "PLAIN" @@ -58,7 +56,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/PLAIN producer and consumer var startingTimestamp = System.currentTimeMillis() sendRecords(plainSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - plainSaslConsumer.assign(List(tp).asJava) + plainSaslConsumer.assign(java.util.List.of(tp)) plainSaslConsumer.seek(tp, 0) consumeAndVerifyRecords(consumer = plainSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -68,7 +66,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/GSSAPI producer and consumer startingTimestamp = System.currentTimeMillis() sendRecords(gssapiSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - gssapiSaslConsumer.assign(List(tp).asJava) + gssapiSaslConsumer.assign(java.util.List.of(tp)) gssapiSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = gssapiSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -78,7 +76,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/PLAIN producer and SASL/GSSAPI consumer startingTimestamp = System.currentTimeMillis() sendRecords(plainSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - gssapiSaslConsumer.assign(List(tp).asJava) + gssapiSaslConsumer.assign(java.util.List.of(tp)) gssapiSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = gssapiSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -87,7 +85,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/GSSAPI producer and SASL/PLAIN consumer startingTimestamp = System.currentTimeMillis() sendRecords(gssapiSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - plainSaslConsumer.assign(List(tp).asJava) + plainSaslConsumer.assign(java.util.List.of(tp)) plainSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = plainSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) diff --git a/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala deleted file mode 100644 index 09f1f5119b134..0000000000000 --- a/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.api - -import kafka.security.JaasTestUtils -import kafka.utils.TestUtils -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.junit.jupiter.api._ - -import java.util.Locale - -@Timeout(600) -class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { - override protected def listenerName = new ListenerName("CLIENT") - private val kafkaClientSaslMechanism = "PLAIN" - private val kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) - private val kafkaServerJaasEntryName = - s"${listenerName.value.toLowerCase(Locale.ROOT)}.${JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME}" - override protected def securityProtocol = SecurityProtocol.SASL_PLAINTEXT - override protected lazy val trustStoreFile = Some(TestUtils.tempFile("truststore", ".jks")) - override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) - override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism)) - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), kafkaServerJaasEntryName)) - super.setUp(testInfo) - } - - @AfterEach - override def tearDown(): Unit = { - super.tearDown() - closeSasl() - } -} diff --git a/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala index 473c4a86e3039..8bff9b25e1734 100644 --- a/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala @@ -47,8 +47,8 @@ class SaslScramSslEndToEndAuthorizationTest extends SaslEndToEndAuthorizationTes // Create the admin credentials for KRaft as part of controller initialization override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setClusterId("XcZZOzUqS4yHOjhMQB6JLQ") - formatter.setScramArguments(List( - s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) + formatter.setScramArguments(java.util.List.of( + s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) } override def configureListeners(props: collection.Seq[Properties]): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/SaslSetup.scala b/core/src/test/scala/integration/kafka/api/SaslSetup.scala index b7d2d920fd931..caef826127c96 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSetup.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSetup.scala @@ -64,9 +64,9 @@ trait SaslSetup { val (serverKeytabFile, clientKeytabFile) = maybeCreateEmptyKeytabFiles() kdc = new MiniKdc(kdcConf, workDir) kdc.start() - kdc.createPrincipal(serverKeytabFile, List(JaasTestUtils.KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME + "/localhost").asJava) + kdc.createPrincipal(serverKeytabFile, java.util.List.of(JaasTestUtils.KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME + "/localhost")) kdc.createPrincipal(clientKeytabFile, - List(JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME, JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME_2).asJava) + java.util.List.of(JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME, JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME_2)) } /** Return a tuple with the path to the server keytab file and client keytab file */ @@ -166,7 +166,7 @@ trait SaslSetup { def createScramCredentials(adminClient: Admin, userName: String, password: String): Unit = { PublicScramMechanism.values().filter(_ != PublicScramMechanism.UNKNOWN).map(mechanism => { - val results = adminClient.alterUserScramCredentials(util.Arrays.asList( + val results = adminClient.alterUserScramCredentials(util.List.of( new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(mechanism, 4096), password))) results.all.get }) diff --git a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala index 66fca0db9badc..fe5f1643d39c6 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala @@ -39,11 +39,10 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo, Timeout} import java.util -import java.util.Collections +import java.util.Optional import scala.collection.Seq import scala.concurrent.ExecutionException import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption import scala.util.{Failure, Success, Try} @Timeout(120) @@ -84,8 +83,8 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def setInitialAcls(): Unit = { superUserAdmin = createSuperuserAdminClient() val ace = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, ALL, ALLOW) - superUserAdmin.createAcls(List(new AclBinding(new ResourcePattern(TOPIC, "*", LITERAL), ace)).asJava) - superUserAdmin.createAcls(List(new AclBinding(new ResourcePattern(GROUP, "*", LITERAL), ace)).asJava) + superUserAdmin.createAcls(java.util.List.of(new AclBinding(new ResourcePattern(TOPIC, "*", LITERAL), ace))) + superUserAdmin.createAcls(java.util.List.of(new AclBinding(new ResourcePattern(GROUP, "*", LITERAL), ace))) val clusterAcls = List(clusterAcl(ALLOW, CREATE), clusterAcl(ALLOW, DELETE), @@ -141,7 +140,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.createAcls(Collections.singleton(acl), new CreateAclsOptions().timeoutMs(0)).all().get() + brokenClient.createAcls(util.Set.of(acl), new CreateAclsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) @@ -157,7 +156,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu try { val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.deleteAcls(Collections.singleton(AclBindingFilter.ANY), new DeleteAclsOptions().timeoutMs(0)).all().get() + brokenClient.deleteAcls(util.Set.of(AclBindingFilter.ANY), new DeleteAclsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) @@ -166,10 +165,10 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu @Test def testExpireDelegationTokenWithOptionExpireTimePeriodMs(): Unit = { client = createAdminClient - val renewer = List(SecurityUtils.parseKafkaPrincipal("User:renewer")) + val renewer = java.util.List.of(SecurityUtils.parseKafkaPrincipal("User:renewer")) def generateTokenResult(maxLifeTimeMs: Int, expiryTimePeriodMs: Int, expectedTokenNum: Int): (CreateDelegationTokenResult, ExpireDelegationTokenResult) = { - val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer.asJava).maxLifetimeMs(maxLifeTimeMs)) + val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer).maxLifetimeMs(maxLifeTimeMs)) val tokenCreated = createResult.delegationToken.get TestUtils.waitUntilTrue(() => brokers.forall(server => server.tokenCache.tokens().size() == expectedTokenNum), "Timed out waiting for token to propagate to all servers") @@ -204,15 +203,15 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) assertEquals(8, getAcls(AclBindingFilter.ANY).size) - val results = client.createAcls(List(acl2, acl3).asJava) + val results = client.createAcls(java.util.List.of(acl2, acl3)) assertEquals(Set(acl2, acl3), results.values.keySet().asScala) results.values.values.forEach(value => value.get) val aclUnknown = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.UNKNOWN, AclPermissionType.ALLOW)) - val results2 = client.createAcls(List(aclUnknown).asJava) + val results2 = client.createAcls(java.util.List.of(aclUnknown)) assertEquals(Set(aclUnknown), results2.values.keySet().asScala) assertFutureThrows(classOf[InvalidRequestException], results2.all) - val results3 = client.deleteAcls(List(acl.toFilter, acl2.toFilter, acl3.toFilter).asJava).values + val results3 = client.deleteAcls(java.util.List.of(acl.toFilter, acl2.toFilter, acl3.toFilter)).values assertEquals(Set(acl.toFilter, acl2.toFilter, acl3.toFilter), results3.keySet.asScala) assertEquals(0, results3.get(acl.toFilter).get.values.size()) assertEquals(Set(acl2), results3.get(acl2.toFilter).get.values.asScala.map(_.binding).toSet) @@ -222,7 +221,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu @Test def testAclOperations2(): Unit = { client = createAdminClient - val results = client.createAcls(List(acl2, acl2, transactionalIdAcl).asJava) + val results = client.createAcls(java.util.List.of(acl2, acl2, transactionalIdAcl)) assertEquals(Set(acl2, acl2, transactionalIdAcl), results.values.keySet.asScala) results.all.get() waitForDescribeAcls(client, acl2.toFilter, Set(acl2)) @@ -235,7 +234,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu waitForDescribeAcls(client, filterA, Set(groupAcl)) waitForDescribeAcls(client, filterC, Set(transactionalIdAcl)) - val results2 = client.deleteAcls(List(filterA, filterB, filterC).asJava, new DeleteAclsOptions()) + val results2 = client.deleteAcls(java.util.List.of(filterA, filterB, filterC), new DeleteAclsOptions()) assertEquals(Set(filterA, filterB, filterC), results2.values.keySet.asScala) assertEquals(Set(groupAcl), results2.values.get(filterA).get.values.asScala.map(_.binding).toSet) assertEquals(Set(transactionalIdAcl), results2.values.get(filterC).get.values.asScala.map(_.binding).toSet) @@ -282,7 +281,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val allPrefixedTopicAcls = new AclBindingFilter(new ResourcePatternFilter(ResourceType.TOPIC, null, PatternType.PREFIXED), AccessControlEntryFilter.ANY) // Delete only ACLs on literal 'mytopic2' topic - var deleted = client.deleteAcls(List(acl2.toFilter).asJava).all().get().asScala.toSet + var deleted = client.deleteAcls(java.util.List.of(acl2.toFilter)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl2.pattern()) } @@ -291,7 +290,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete only ACLs on literal '*' topic - deleted = client.deleteAcls(List(anyAcl.toFilter).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(anyAcl.toFilter)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, anyAcl.pattern()) } @@ -300,7 +299,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete only ACLs on specific prefixed 'mytopic' topics: - deleted = client.deleteAcls(List(prefixAcl.toFilter).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(prefixAcl.toFilter)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, prefixAcl.pattern()) } @@ -309,7 +308,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete all literal ACLs: - deleted = client.deleteAcls(List(allLiteralTopicAcls).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(allLiteralTopicAcls)).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl).foreach(acl => waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) @@ -320,7 +319,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete all prefixed ACLs: - deleted = client.deleteAcls(List(allPrefixedTopicAcls).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(allPrefixedTopicAcls)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, prefixAcl.pattern()) } @@ -329,7 +328,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete all topic ACLs: - deleted = client.deleteAcls(List(allTopicAcls).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(allTopicAcls)).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl, prefixAcl).foreach(acl => waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) @@ -356,7 +355,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu assertEquals(Set(fooAcl), getAcls(legacyFooTopicAcls)) // Delete only (legacy) ACLs on 'mytopic2' topic - var deleted = client.deleteAcls(List(legacyMyTopic2Acls).asJava).all().get().asScala.toSet + var deleted = client.deleteAcls(java.util.List.of(legacyMyTopic2Acls)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl2.pattern()) } @@ -365,7 +364,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete only (legacy) ACLs on '*' topic - deleted = client.deleteAcls(List(legacyAnyTopicAcls).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(legacyAnyTopicAcls)).all().get().asScala.toSet brokers.foreach { b => waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, anyAcl.pattern()) } @@ -374,7 +373,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete all (legacy) topic ACLs: - deleted = client.deleteAcls(List(legacyAllTopicAcls).asJava).all().get().asScala.toSet + deleted = client.deleteAcls(java.util.List.of(legacyAllTopicAcls)).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl).foreach(acl => waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) @@ -391,7 +390,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.READ, AclPermissionType.ALLOW)) val emptyResourceNameAcl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.READ, AclPermissionType.ALLOW)) - val results = client.createAcls(List(clusterAcl, emptyResourceNameAcl).asJava, new CreateAclsOptions()) + val results = client.createAcls(java.util.List.of(clusterAcl, emptyResourceNameAcl), new CreateAclsOptions()) assertEquals(Set(clusterAcl, emptyResourceNameAcl), results.values.keySet().asScala) assertFutureThrows(classOf[InvalidRequestException], results.values.get(clusterAcl)) assertFutureThrows(classOf[InvalidRequestException], results.values.get(emptyResourceNameAcl)) @@ -434,7 +433,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def testAclCreateGetDelete(expectAuth: Boolean): Unit = { TestUtils.waitUntilTrue(() => { - val result = client.createAcls(List(fooAcl, transactionalIdAcl).asJava, new CreateAclsOptions) + val result = client.createAcls(java.util.List.of(fooAcl, transactionalIdAcl), new CreateAclsOptions) if (expectAuth) { Try(result.all.get) match { case Failure(e) => @@ -456,7 +455,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu waitForDescribeAcls(client, transactionalIdAcl.toFilter, Set(transactionalIdAcl)) } TestUtils.waitUntilTrue(() => { - val result = client.deleteAcls(List(fooAcl.toFilter, transactionalIdAcl.toFilter).asJava, new DeleteAclsOptions) + val result = client.deleteAcls(java.util.List.of(fooAcl.toFilter, transactionalIdAcl.toFilter), new DeleteAclsOptions) if (expectAuth) { Try(result.all.get) match { case Failure(e) => @@ -541,7 +540,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def addClusterAcl(permissionType: AclPermissionType, operation: AclOperation): Unit = { val ace = clusterAcl(permissionType, operation) - superUserAdmin.createAcls(List(new AclBinding(clusterResourcePattern, ace)).asJava) + superUserAdmin.createAcls(java.util.List.of(new AclBinding(clusterResourcePattern, ace))) brokers.foreach { b => waitAndVerifyAcl(ace, b.dataPlaneRequestProcessor.authorizerPlugin.get, clusterResourcePattern) } @@ -549,7 +548,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def removeClusterAcl(permissionType: AclPermissionType, operation: AclOperation): Unit = { val ace = clusterAcl(permissionType, operation) - superUserAdmin.deleteAcls(List(new AclBinding(clusterResourcePattern, ace).toFilter).asJava).values + superUserAdmin.deleteAcls(java.util.List.of(new AclBinding(clusterResourcePattern, ace).toFilter)).values brokers.foreach { b => waitAndVerifyRemovedAcl(ace, b.dataPlaneRequestProcessor.authorizerPlugin.get, clusterResourcePattern) @@ -564,14 +563,14 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu new AccessControlEntry("User:*", "*", AclOperation.DESCRIBE_CONFIGS, AclPermissionType.DENY)) client = createAdminClient - client.createAcls(List(denyAcl).asJava, new CreateAclsOptions()).all().get() + client.createAcls(java.util.List.of(denyAcl), new CreateAclsOptions()).all().get() val topics = Seq(topic1, topic2) - val configsOverride = Map(TopicConfig.SEGMENT_BYTES_CONFIG -> "100000").asJava - val newTopics = Seq( + val configsOverride = java.util.Map.of(TopicConfig.SEGMENT_BYTES_CONFIG, "3000000") + val newTopics = java.util.List.of( new NewTopic(topic1, 2, 3.toShort).configs(configsOverride), - new NewTopic(topic2, Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava).configs(configsOverride)) - val validateResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)) + new NewTopic(topic2, Optional.empty[Integer], Optional.empty[java.lang.Short]).configs(configsOverride)) + val validateResult = client.createTopics(newTopics, new CreateTopicsOptions().validateOnly(true)) validateResult.all.get() waitForTopics(client, List(), topics) @@ -581,7 +580,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val topicConfigs = result.config(topic1).get().entries.asScala assertTrue(topicConfigs.nonEmpty) val segmentBytesConfig = topicConfigs.find(_.name == TopicConfig.SEGMENT_BYTES_CONFIG).get - assertEquals(100000, segmentBytesConfig.value.toLong) + assertEquals(3000000, segmentBytesConfig.value.toLong) assertEquals(ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, segmentBytesConfig.source) val compressionConfig = topicConfigs.find(_.name == TopicConfig.COMPRESSION_TYPE_CONFIG).get assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, compressionConfig.value) @@ -593,7 +592,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } validateMetadataAndConfigs(validateResult) - val createResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions()) + val createResult = client.createTopics(newTopics, new CreateTopicsOptions()) createResult.all.get() waitForTopics(client, topics, List()) validateMetadataAndConfigs(createResult) @@ -680,7 +679,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu TestUtils.waitUntilTrue(() => { try { - val topicResponse = client.describeConfigs(List(topicResource).asJava).all.get.get(topicResource) + val topicResponse = client.describeConfigs(java.util.List.of(topicResource)).all.get.get(topicResource) configEntries = topicResponse.entries.asScala true } catch { diff --git a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala index 4b1b94336f799..2150db202e1e9 100644 --- a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala @@ -202,7 +202,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { // Therefore, only the number of controller I/O threads is relevant in this context. val numReqThreads = controllerServers.head.config.numIoThreads * controllerServers.size while (blockedRequestThreads.size < numReqThreads) { - aclFutures += createAdminClient.createAcls(List(acl2).asJava) + aclFutures += createAdminClient.createAcls(java.util.List.of(acl2)) assertTrue(aclFutures.size < numReqThreads * 10, s"Request threads not blocked numRequestThreads=$numReqThreads blocked=$blockedRequestThreads aclFutures=${aclFutures.size}") } @@ -231,7 +231,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { } } (0 until numTimedOut) - .map(_ => createAdminClient.createAcls(List(acl2).asJava)) + .map(_ => createAdminClient.createAcls(java.util.List.of(acl2))) .foreach(_.all().get(30, TimeUnit.SECONDS)) } @@ -251,7 +251,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { // In KRaft mode, ACL creation is handled exclusively by controller servers, not brokers. // Therefore, only the number of controller I/O threads is relevant in this context. val numReqThreads = controllerServers.head.config.numIoThreads * controllerServers.size - val aclFutures = (0 until numReqThreads).map(_ => createAdminClient.createAcls(List(acl2).asJava)) + val aclFutures = (0 until numReqThreads).map(_ => createAdminClient.createAcls(java.util.List.of(acl2))) waitForNoBlockedRequestThreads() assertTrue(aclFutures.forall(future => !future.all.isDone)) @@ -287,7 +287,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { useBoostrapControllers() client = createAdminClient - val results = client.createAcls(List(acl2, acl3).asJava).values + val results = client.createAcls(java.util.List.of(acl2, acl3)).values assertEquals(Set(acl2, acl3), results.keySet().asScala) assertFalse(results.values.asScala.exists(_.isDone)) TestUtils.waitUntilTrue(() => testSemaphore.hasQueuedThreads, "Authorizer not blocked in createAcls") @@ -296,7 +296,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { validateRequestContext(SslAdminIntegrationTest.lastUpdateRequestContext.get, ApiKeys.CREATE_ACLS) testSemaphore.acquire() - val results2 = client.deleteAcls(List(acl.toFilter, acl2.toFilter, acl3.toFilter).asJava).values + val results2 = client.deleteAcls(java.util.List.of(acl.toFilter, acl2.toFilter, acl3.toFilter)).values assertEquals(Set(acl.toFilter, acl2.toFilter, acl3.toFilter), results2.keySet.asScala) assertFalse(results2.values.asScala.exists(_.isDone)) TestUtils.waitUntilTrue(() => testSemaphore.hasQueuedThreads, "Authorizer not blocked in deleteAcls") @@ -353,7 +353,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { val controllerListenerName = ListenerName.forSecurityProtocol(extraControllerSecurityProtocol) val config = controllerServers.map { s => val listener = s.config.effectiveAdvertisedControllerListeners - .find(_.listenerName == controllerListenerName) + .find(_.listener == controllerListenerName.value) .getOrElse(throw new IllegalArgumentException(s"Could not find listener with name $controllerListenerName")) Utils.formatAddress(listener.host, s.socketServer.boundPort(controllerListenerName)) }.mkString(",") diff --git a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala index 3e0ba00d3f924..49ff3d7acacaf 100644 --- a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala @@ -68,7 +68,7 @@ class SslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { // - a space character occurring at the end of the string // - one of the characters ",", "+", """, "\", "<", ">" or ";" // - // Leading and trailing spaces in Kafka principal dont work with ACLs, but we can workaround by using + // Leading and trailing spaces in Kafka principal don't work with ACLs, but we can workaround by using // a PrincipalBuilder that removes/replaces them. private val clientCn = """\#A client with special chars in CN : (\, \+ \" \\ \< \> \; ')""" override val clientPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, s"O=A client,CN=$clientCn") diff --git a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala index 3be1f4a2eb3f6..e37af5441c275 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala @@ -156,8 +156,8 @@ class TransactionsTest extends IntegrationTestHarness { verifyLogStartOffsets(Map((tp11, 0), (tp22, 0))) maybeVerifyLocalLogStartOffsets(Map((tp11, 3L), (tp22, 3L))) - consumer.subscribe(List(topic1, topic2).asJava) - unCommittedConsumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) + unCommittedConsumer.subscribe(java.util.List.of(topic1, topic2)) val records = consumeRecords(consumer, 2) records.foreach { record => @@ -204,19 +204,19 @@ class TransactionsTest extends IntegrationTestHarness { // ensure the records are visible to the read uncommitted consumer val tp1 = new TopicPartition(topic1, 0) val tp2 = new TopicPartition(topic2, 0) - readUncommittedConsumer.assign(Set(tp1, tp2).asJava) + readUncommittedConsumer.assign(java.util.Set.of(tp1, tp2)) consumeRecords(readUncommittedConsumer, 8) - val readUncommittedOffsetsForTimes = readUncommittedConsumer.offsetsForTimes(Map( - tp1 -> (latestWrittenTimestamp: JLong), - tp2 -> (latestWrittenTimestamp: JLong) - ).asJava) + val readUncommittedOffsetsForTimes = readUncommittedConsumer.offsetsForTimes(java.util.Map.of( + tp1, latestWrittenTimestamp: JLong, + tp2, latestWrittenTimestamp: JLong + )) assertEquals(2, readUncommittedOffsetsForTimes.size) assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp1).timestamp) assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp2).timestamp) readUncommittedConsumer.unsubscribe() // we should only see the first two records which come before the undecided second transaction - readCommittedConsumer.assign(Set(tp1, tp2).asJava) + readCommittedConsumer.assign(java.util.Set.of(tp1, tp2)) val records = consumeRecords(readCommittedConsumer, 2) records.foreach { record => assertEquals("x", new String(record.key)) @@ -231,10 +231,10 @@ class TransactionsTest extends IntegrationTestHarness { } // undecided timestamps should not be searchable either - val readCommittedOffsetsForTimes = readCommittedConsumer.offsetsForTimes(Map( - tp1 -> (latestWrittenTimestamp: JLong), - tp2 -> (latestWrittenTimestamp: JLong) - ).asJava) + val readCommittedOffsetsForTimes = readCommittedConsumer.offsetsForTimes(java.util.Map.of( + tp1, latestWrittenTimestamp: JLong, + tp2, latestWrittenTimestamp: JLong + )) assertNull(readCommittedOffsetsForTimes.get(tp1)) assertNull(readCommittedOffsetsForTimes.get(tp2)) } @@ -282,7 +282,7 @@ class TransactionsTest extends IntegrationTestHarness { consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100") val readCommittedConsumer = createReadCommittedConsumer(props = consumerProps) - readCommittedConsumer.assign(Set(tp10).asJava) + readCommittedConsumer.assign(java.util.Set.of(tp10)) val records = consumeRecords(readCommittedConsumer, numRecords = 2) assertEquals(2, records.size) @@ -324,7 +324,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer = transactionalProducers.head val consumer = createReadCommittedConsumer(consumerGroupId, maxPollRecords = numSeedMessages / 4) - consumer.subscribe(List(topic1).asJava) + consumer.subscribe(java.util.List.of(topic1)) producer.initTransactions() var shouldCommit = false @@ -368,7 +368,7 @@ class TransactionsTest extends IntegrationTestHarness { // In spite of random aborts, we should still have exactly 500 messages in topic2. I.e. we should not // re-copy or miss any messages from topic1, since the consumed offsets were committed transactionally. val verifyingConsumer = transactionalConsumers(0) - verifyingConsumer.subscribe(List(topic2).asJava) + verifyingConsumer.subscribe(java.util.List.of(topic2)) val valueSeq = TestUtils.pollUntilAtLeastNumRecords(verifyingConsumer, numSeedMessages).map { record => TestUtils.assertCommittedAndGetValue(record).toInt } @@ -384,7 +384,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) producer1.initTransactions() @@ -415,7 +415,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) producer1.initTransactions() @@ -429,8 +429,8 @@ class TransactionsTest extends IntegrationTestHarness { producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "4", willBeCommitted = true)) producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "4", willBeCommitted = true)) - assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(Map(new TopicPartition(topic1, 0) - -> new OffsetAndMetadata(110L)).asJava, new ConsumerGroupMetadata("foobarGroup"))) + assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(java.util.Map.of(new TopicPartition(topic1, 0), + new OffsetAndMetadata(110L)), new ConsumerGroupMetadata("foobarGroup"))) producer2.commitTransaction() // ok @@ -449,13 +449,13 @@ class TransactionsTest extends IntegrationTestHarness { val producer = transactionalProducers.head val consumer = createReadCommittedConsumer(groupId) - consumer.subscribe(List(topic1).asJava) + consumer.subscribe(java.util.List.of(topic1)) producer.initTransactions() producer.beginTransaction() val offsetAndMetadata = new OffsetAndMetadata(110L, Optional.of(15), "some metadata") - producer.sendOffsetsToTransaction(Map(tp -> offsetAndMetadata).asJava, new ConsumerGroupMetadata(groupId)) + producer.sendOffsetsToTransaction(java.util.Map.of(tp, offsetAndMetadata), new ConsumerGroupMetadata(groupId)) producer.commitTransaction() // ok // The call to commit the transaction may return before all markers are visible, so we initialize a second @@ -463,7 +463,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) producer2.initTransactions() - TestUtils.waitUntilTrue(() => offsetAndMetadata.equals(consumer.committed(Set(tp).asJava).get(tp)), "cannot read committed offset") + TestUtils.waitUntilTrue(() => offsetAndMetadata.equals(consumer.committed(java.util.Set.of(tp)).get(tp)), "cannot read committed offset") } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -476,7 +476,7 @@ class TransactionsTest extends IntegrationTestHarness { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testSendOffsetsToTransactionTimeout(groupProtocol: String): Unit = { testTimeout(needInitAndSendMsg = true, producer => producer.sendOffsetsToTransaction( - Map(new TopicPartition(topic1, 0) -> new OffsetAndMetadata(0)).asJava, new ConsumerGroupMetadata("test-group"))) + java.util.Map.of(new TopicPartition(topic1, 0), new OffsetAndMetadata(0)), new ConsumerGroupMetadata("test-group"))) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @@ -513,7 +513,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) producer1.initTransactions() @@ -558,7 +558,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(List(topic1, topic2).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2)) TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic1, 0)) TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic2, 0)) @@ -629,7 +629,7 @@ class TransactionsTest extends IntegrationTestHarness { // Verify that the first message was aborted and the second one was never written at all. val nonTransactionalConsumer = nonTransactionalConsumers.head - nonTransactionalConsumer.subscribe(List(topic1).asJava) + nonTransactionalConsumer.subscribe(java.util.List.of(topic1)) // Attempt to consume the one written record. We should not see the second. The // assertion does not strictly guarantee that the record wasn't written, but the @@ -639,7 +639,7 @@ class TransactionsTest extends IntegrationTestHarness { assertEquals("1", TestUtils.recordValueAsString(records.head)) val transactionalConsumer = transactionalConsumers.head - transactionalConsumer.subscribe(List(topic1).asJava) + transactionalConsumer.subscribe(java.util.List.of(topic1)) val transactionalRecords = consumeRecordsFor(transactionalConsumer) assertTrue(transactionalRecords.isEmpty) @@ -668,8 +668,8 @@ class TransactionsTest extends IntegrationTestHarness { sendTransactionalMessagesWithValueRange(firstProducer, topicWith10Partitions, 10000, 11000, willBeCommitted = true) firstProducer.commitTransaction() - consumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava) - unCommittedConsumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava) + consumer.subscribe(java.util.List.of(topicWith10PartitionsAndOneReplica, topicWith10Partitions)) + unCommittedConsumer.subscribe(java.util.List.of(topicWith10PartitionsAndOneReplica, topicWith10Partitions)) val records = consumeRecords(consumer, 1000) records.foreach { record => @@ -698,7 +698,7 @@ class TransactionsTest extends IntegrationTestHarness { "consumer,false", )) def testBumpTransactionalEpochWithTV2Disabled(groupProtocol: String, isTV2Enabled: Boolean): Unit = { - val defaultLinger = 5; + val defaultLinger = 5 val producer = createTransactionalProducer("transactionalProducer", deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head @@ -753,7 +753,7 @@ class TransactionsTest extends IntegrationTestHarness { producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = true)) producer.commitTransaction() - consumer.subscribe(List(topic1, topic2, testTopic).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2, testTopic)) val records = consumeRecords(consumer, 5) records.foreach { record => @@ -778,7 +778,7 @@ class TransactionsTest extends IntegrationTestHarness { "consumer, true" )) def testBumpTransactionalEpochWithTV2Enabled(groupProtocol: String, isTV2Enabled: Boolean): Unit = { - val defaultLinger = 5; + val defaultLinger = 5 val producer = createTransactionalProducer("transactionalProducer", deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head @@ -834,7 +834,7 @@ class TransactionsTest extends IntegrationTestHarness { producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = true)) producer.commitTransaction() - consumer.subscribe(List(topic1, topic2, testTopic).asJava) + consumer.subscribe(java.util.List.of(topic1, topic2, testTopic)) val records = consumeRecords(consumer, 5) records.foreach { record => @@ -991,11 +991,10 @@ class TransactionsTest extends IntegrationTestHarness { waitUntilTrue(() => { brokers.forall(broker => { partitionStartOffsets.forall { - case (partition, offset) => { + case (partition, offset) => val lso = broker.replicaManager.localLog(partition).get.logStartOffset offsets.put(broker.config.brokerId, lso) offset == lso - } } }) }, s"log start offset doesn't change to the expected position: $partitionStartOffsets, current position: $offsets") diff --git a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala index 674a379cfeaac..772af45733324 100644 --- a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala @@ -17,8 +17,7 @@ package kafka.coordinator.transaction -import kafka.network.SocketServer -import kafka.server.IntegrationTestUtils +import org.apache.kafka.server.IntegrationTestUtils import org.apache.kafka.clients.admin.{Admin, NewTopic, TransactionState} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerRecords, OffsetAndMetadata} import org.apache.kafka.clients.producer.{Producer, ProducerConfig, ProducerRecord} @@ -27,19 +26,18 @@ import org.apache.kafka.common.errors.RecordTooLargeException import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, ClusterTests, Type} import org.apache.kafka.common.message.InitProducerIdRequestData -import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{InitProducerIdRequest, InitProducerIdResponse} -import org.apache.kafka.common.test.{ClusterInstance, TestUtils} +import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{Feature, MetadataVersion} +import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions.{assertEquals, assertInstanceOf, assertThrows, assertTrue} import java.time.Duration import java.util -import java.util.Collections import java.util.concurrent.ExecutionException import java.util.stream.{Collectors, IntStream, StreamSupport} import scala.concurrent.duration.DurationInt @@ -95,7 +93,7 @@ class ProducerIntegrationTest { new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 2))), )) def testTransactionWithInvalidSendAndEndTxnRequestSent(cluster: ClusterInstance): Unit = { - val topic = new NewTopic("foobar", 1, 1.toShort).configs(Collections.singletonMap(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "100")) + val topic = new NewTopic("foobar", 1, 1.toShort).configs(util.Map.of(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "100")) val txnId = "test-txn" val properties = new util.HashMap[String, Object] properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txnId) @@ -105,7 +103,7 @@ class ProducerIntegrationTest { val admin = cluster.admin() val producer: Producer[Array[Byte], Array[Byte]] = cluster.producer(properties) try { - admin.createTopics(List(topic).asJava) + admin.createTopics(util.List.of(topic)) producer.initTransactions() producer.beginTransaction() @@ -162,7 +160,7 @@ class ProducerIntegrationTest { records.count == 5 }, "poll records size not match") val lastRecord = StreamSupport.stream(records.spliterator, false).reduce((_, second) => second).orElse(null) - val offsets = Collections.singletonMap( + val offsets = util.Map.of( new TopicPartition(lastRecord.topic, lastRecord.partition), new OffsetAndMetadata(lastRecord.offset + 1)) producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata) producer.commitTransaction() @@ -183,9 +181,9 @@ class ProducerIntegrationTest { private def verifyUniqueIds(clusterInstance: ClusterInstance): Unit = { // Request enough PIDs from each broker to ensure each broker generates two blocks - val ids = clusterInstance.brokerSocketServers().stream().flatMap( broker => { - IntStream.range(0, 1001).parallel().mapToObj( _ => - nextProducerId(broker, clusterInstance.clientListener()) + val ids = clusterInstance.brokers().values().stream().flatMap(broker => { + IntStream.range(0, 1001).parallel().mapToObj(_ => + nextProducerId(broker.boundPort(clusterInstance.clientListener())) )}).collect(Collectors.toList[Long]).asScala.toSeq val brokerCount = clusterInstance.brokerIds.size @@ -194,7 +192,7 @@ class ProducerIntegrationTest { assertEquals(expectedTotalCount, ids.distinct.size, "Found duplicate producer IDs") } - private def nextProducerId(broker: SocketServer, listener: ListenerName): Long = { + private def nextProducerId(port: Int): Long = { // Generating producer ids may fail while waiting for the initial block and also // when the current block is full and waiting for the prefetched block. val deadline = 5.seconds.fromNow @@ -207,11 +205,7 @@ class ProducerIntegrationTest { .setTransactionalId(null) .setTransactionTimeoutMs(10) val request = new InitProducerIdRequest.Builder(data).build() - - response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, - destination = broker, - listenerName = listener) - + response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, port) shouldRetry = response.data.errorCode == Errors.COORDINATOR_LOAD_IN_PROGRESS.code } assertTrue(deadline.hasTimeLeft()) diff --git a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala index 4f5cd7f4a2803..e2db135124459 100644 --- a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala @@ -30,18 +30,16 @@ import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.{ProduceRequest, ProduceResponse} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.test.api.Flaky -import org.apache.kafka.common.{KafkaException, requests} +import org.apache.kafka.common.{KafkaException, Uuid, requests} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.config.QuotaConfig import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.io.IOException import java.net.{InetAddress, Socket} import java.util.concurrent.{ExecutorService, Executors, TimeUnit} -import java.util.{Collections, Properties} +import java.util.Properties import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -56,7 +54,7 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { val plaintextListenerDefaultQuota = 30 var executor: ExecutorService = _ var admin: Admin = _ - + var topicId: Uuid = _ override def brokerPropertyOverrides(properties: Properties): Unit = { properties.put(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, "2") properties.put("listener.name.plaintext.max.connection.creation.rate", plaintextListenerDefaultQuota.toString) @@ -67,6 +65,7 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { super.setUp(testInfo) admin = createAdminClient(listener) TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers) + topicId = TestUtils.describeTopic(admin, topic).topicId() } @AfterEach @@ -83,9 +82,8 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } @Flaky("KAFKA-17999") - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicConnectionQuota(quorum: String): Unit = { + @Test + def testDynamicConnectionQuota(): Unit = { val maxConnectionsPerIP = 5 def connectAndVerify(): Unit = { @@ -111,9 +109,8 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { verifyMaxConnections(maxConnectionsPerIPOverride, connectAndVerify) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicListenerConnectionQuota(quorum: String): Unit = { + @Test + def testDynamicListenerConnectionQuota(): Unit = { val initialConnectionCount = connectionCount def connectAndVerify(): Unit = { @@ -184,9 +181,8 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicListenerConnectionCreationRateQuota(quorum: String): Unit = { + @Test + def testDynamicListenerConnectionCreationRateQuota(): Unit = { // Create another listener. PLAINTEXT is an inter-broker listener // keep default limits val newListenerNames = Seq("PLAINTEXT", "EXTERNAL") @@ -246,9 +242,8 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { waitForConnectionCount(initialConnectionCount) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicIpConnectionRateQuota(quorum: String): Unit = { + @Test + def testDynamicIpConnectionRateQuota(): Unit = { val connRateLimit = 10 val initialConnectionCount = connectionCount // before setting connection rate to 10, verify we can do at least double that by default (no limit) @@ -307,9 +302,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { private def produceRequest: ProduceRequest = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(topic) - .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() + java.util.List.of(new ProduceRequestData.TopicProduceData() + .setTopicId(topicId) + .setPartitionData(java.util.List.of(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord(System.currentTimeMillis(), "key".getBytes, "value".getBytes)))))) diff --git a/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala b/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala index 91bf8c0378364..2273a69cf99ed 100644 --- a/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala +++ b/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala @@ -24,9 +24,7 @@ import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.util.Properties import scala.jdk.CollectionConverters._ @@ -66,9 +64,8 @@ class DynamicNumNetworkThreadsTest extends BaseRequestTest { .count(listener == _.tags().get("listener")) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicNumNetworkThreads(quorum: String): Unit = { + @Test + def testDynamicNumNetworkThreads(): Unit = { // Increase the base network thread count val newBaseNetworkThreadsCount = SocketServerConfigs.NUM_NETWORK_THREADS_DEFAULT + 1 var props = new Properties diff --git a/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala b/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala index 9c5fcf90779ee..f10beb0086fa8 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala @@ -16,7 +16,7 @@ */ package kafka.server -import java.util.Optional +import java.util.{Optional, OptionalLong} import scala.collection.Seq import kafka.cluster.Partition import org.apache.kafka.common.{TopicIdPartition, Uuid} @@ -25,6 +25,7 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEnd import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest +import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogOffsetMetadata, LogOffsetSnapshot} import org.junit.jupiter.api.Test @@ -255,16 +256,16 @@ class DelayedFetchTest { } private def buildReadResult(error: Errors): LogReadResult = { - LogReadResult( - exception = if (error != Errors.NONE) Some(error.exception) else None, - info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, - highWatermark = -1L, - leaderLogStartOffset = -1L, - leaderLogEndOffset = -1L, - followerLogStartOffset = -1L, - fetchTimeMs = -1L, - lastStableOffset = None) + new LogReadResult( + new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + Optional.empty(), + -1L, + -1L, + -1L, + -1L, + -1L, + OptionalLong.empty(), + if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) } } diff --git a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala index 21c59ab89a130..23b4b32b0d744 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala @@ -16,20 +16,23 @@ */ package kafka.server +import com.yammer.metrics.core.Meter import kafka.cluster.Partition import org.apache.kafka.common.errors.NotLeaderOrFollowerException import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.{TopicIdPartition, Uuid} +import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log._ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test -import org.mockito.Mockito.{mock, verify, when} +import org.mockito.ArgumentMatchers.anyBoolean +import org.mockito.Mockito.{mock, never, verify, when} -import java.util.Optional +import java.util.{Collections, Optional, OptionalLong} import java.util.concurrent.{CompletableFuture, Future} import scala.collection._ import scala.jdk.CollectionConverters._ @@ -38,6 +41,7 @@ class DelayedRemoteFetchTest { private val maxBytes = 1024 private val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) private val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + private val topicIdPartition2 = new TopicIdPartition(Uuid.randomUuid(), 0, "topic2") private val fetchOffset = 500L private val logStartOffset = 0L private val currentLeaderEpoch = Optional.of[Integer](10) @@ -60,14 +64,22 @@ class DelayedRemoteFetchTest { } val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(null) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null) + future.complete(buildRemoteReadResult(Errors.NONE)) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) val highWatermark = 100 val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch( + java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), + java.util.Collections.singletonMap(topicIdPartition, future), + java.util.Collections.singletonMap(topicIdPartition, fetchInfo), + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), + fetchParams, + Seq(topicIdPartition -> logReadInfo), + replicaManager, + callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) @@ -96,14 +108,23 @@ class DelayedRemoteFetchTest { } val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(null) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null) + future.complete(buildRemoteReadResult(Errors.NONE)) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) val highWatermark = 100 val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) val fetchParams = buildFetchParams(replicaId = 1, maxWaitMs = 500) - assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback)) + + assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch( + java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), + java.util.Collections.singletonMap(topicIdPartition, future), + java.util.Collections.singletonMap(topicIdPartition, fetchInfo), + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), + fetchParams, + Seq(topicIdPartition -> logReadInfo), + replicaManager, + callback)) } @Test @@ -122,12 +143,20 @@ class DelayedRemoteFetchTest { .thenThrow(new NotLeaderOrFollowerException(s"Replica for $topicIdPartition not available")) val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) val logReadInfo = buildReadResult(Errors.NONE) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch( + java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), + java.util.Collections.singletonMap(topicIdPartition, future), + java.util.Collections.singletonMap(topicIdPartition, fetchInfo), + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), + fetchParams, + Seq(topicIdPartition -> logReadInfo), + replicaManager, + callback) // delayed remote fetch should still be able to complete assertTrue(delayedRemoteFetch.tryComplete()) @@ -151,14 +180,22 @@ class DelayedRemoteFetchTest { .thenReturn(mock(classOf[Partition])) val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(null) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null) + future.complete(buildRemoteReadResult(Errors.NONE)) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) // build a read result with error val logReadInfo = buildReadResult(Errors.FENCED_LEADER_EPOCH) - val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val delayedRemoteFetch = new DelayedRemoteFetch( + java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), + java.util.Collections.singletonMap(topicIdPartition, future), + java.util.Collections.singletonMap(topicIdPartition, fetchInfo), + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), + fetchParams, + Seq(topicIdPartition -> logReadInfo), + replicaManager, + callback) assertTrue(delayedRemoteFetch.tryComplete()) assertTrue(delayedRemoteFetch.isCompleted) @@ -169,52 +206,262 @@ class DelayedRemoteFetchTest { @Test def testRequestExpiry(): Unit = { - var actualTopicPartition: Option[TopicIdPartition] = None - var fetchResultOpt: Option[FetchPartitionData] = None + val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() - def callback(responses: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { - assertEquals(1, responses.size) - actualTopicPartition = Some(responses.head._1) - fetchResultOpt = Some(responses.head._2) + def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { + responseSeq.foreach { case (tp, data) => + responses.put(tp, data) + } } + def expiresPerSecValue(): Double = { + val allMetrics = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala + val metric = allMetrics.find { case (n, _) => n.getMBeanName.endsWith("kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec") } + + if (metric.isEmpty) + 0 + else + metric.get._2.asInstanceOf[Meter].count + } + + val remoteFetchTaskExpired = mock(classOf[Future[Void]]) + val remoteFetchTask2 = mock(classOf[Future[Void]]) + // complete the 2nd task, and keep the 1st one expired + when(remoteFetchTask2.isDone).thenReturn(true) + + // Create futures - one completed, one not + val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + // Only complete one remote fetch + future2.complete(buildRemoteReadResult(Errors.NONE)) + + val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) + val highWatermark = 100 val leaderLogStartOffset = 10 - val remoteFetchTask = mock(classOf[Future[Void]]) - val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null) - val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - - val delayedRemoteFetch = new DelayedRemoteFetch(remoteFetchTask, future, fetchInfo, remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) + val logReadInfo1 = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) + val logReadInfo2 = buildReadResult(Errors.NONE) + + val fetchStatus1 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) + val fetchStatus2 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) + + // Set up maps for multiple partitions + val remoteFetchTasks = new java.util.HashMap[TopicIdPartition, Future[Void]]() + val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() + val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() + + remoteFetchTasks.put(topicIdPartition, remoteFetchTaskExpired) + remoteFetchTasks.put(topicIdPartition2, remoteFetchTask2) + remoteFetchResults.put(topicIdPartition, future1) + remoteFetchResults.put(topicIdPartition2, future2) + remoteFetchInfos.put(topicIdPartition, fetchInfo1) + remoteFetchInfos.put(topicIdPartition2, fetchInfo2) + + val delayedRemoteFetch = new DelayedRemoteFetch( + remoteFetchTasks, + remoteFetchResults, + remoteFetchInfos, + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), + fetchParams, + Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), + replicaManager, + callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) + when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) + .thenReturn(mock(classOf[Partition])) // Verify that the ExpiresPerSec metric is zero before fetching - val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics - assertEquals(0, metrics.keySet.asScala.count(_.getMBeanName == "kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec")) + val existingMetricVal = expiresPerSecValue() + // Verify the delayedRemoteFetch is not completed yet + assertFalse(delayedRemoteFetch.isCompleted) // Force the delayed remote fetch to expire delayedRemoteFetch.run() - // Check that the task was cancelled and force-completed - verify(remoteFetchTask).cancel(false) + // Check that the expired task was cancelled and force-completed + verify(remoteFetchTaskExpired).cancel(anyBoolean()) + verify(remoteFetchTask2, never()).cancel(anyBoolean()) assertTrue(delayedRemoteFetch.isCompleted) // Check that the ExpiresPerSec metric was incremented - assertEquals(1, metrics.keySet.asScala.count(_.getMBeanName == "kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec")) + assertTrue(expiresPerSecValue() > existingMetricVal) - // Fetch results should still include local read results - assertTrue(actualTopicPartition.isDefined) - assertEquals(topicIdPartition, actualTopicPartition.get) - assertTrue(fetchResultOpt.isDefined) + // Fetch results should include 2 results and the expired one should return local read results + assertEquals(2, responses.size) + assertTrue(responses.contains(topicIdPartition)) + assertTrue(responses.contains(topicIdPartition2)) - val fetchResult = fetchResultOpt.get - assertEquals(Errors.NONE, fetchResult.error) - assertEquals(highWatermark, fetchResult.highWatermark) - assertEquals(leaderLogStartOffset, fetchResult.logStartOffset) + assertEquals(Errors.NONE, responses(topicIdPartition).error) + assertEquals(highWatermark, responses(topicIdPartition).highWatermark) + assertEquals(leaderLogStartOffset, responses(topicIdPartition).logStartOffset) + + assertEquals(Errors.NONE, responses(topicIdPartition2).error) + } + + @Test + def testMultiplePartitions(): Unit = { + val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() + + def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { + responseSeq.foreach { case (tp, data) => + responses.put(tp, data) + } + } + + // Create futures - one completed, one not + val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + // Only complete one remote fetch + future1.complete(buildRemoteReadResult(Errors.NONE)) + + val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) + + val highWatermark1 = 100 + val leaderLogStartOffset1 = 10 + val highWatermark2 = 200 + val leaderLogStartOffset2 = 20 + + val logReadInfo1 = buildReadResult(Errors.NONE, 100, 10) + val logReadInfo2 = buildReadResult(Errors.NONE, 200, 20) + + val fetchStatus1 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) + val fetchStatus2 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) + + // Set up maps for multiple partitions + val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() + val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() + + remoteFetchResults.put(topicIdPartition, future1) + remoteFetchResults.put(topicIdPartition2, future2) + remoteFetchInfos.put(topicIdPartition, fetchInfo1) + remoteFetchInfos.put(topicIdPartition2, fetchInfo2) + + val delayedRemoteFetch = new DelayedRemoteFetch( + Collections.emptyMap[TopicIdPartition, Future[Void]](), + remoteFetchResults, + remoteFetchInfos, + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), + fetchParams, + Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), + replicaManager, + callback) + + when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) + .thenReturn(mock(classOf[Partition])) + when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) + .thenReturn(mock(classOf[Partition])) + + // Should not complete since future2 is not done + assertFalse(delayedRemoteFetch.tryComplete()) + assertFalse(delayedRemoteFetch.isCompleted) + + // Complete future2 + future2.complete(buildRemoteReadResult(Errors.NONE)) + + // Now it should complete + assertTrue(delayedRemoteFetch.tryComplete()) + assertTrue(delayedRemoteFetch.isCompleted) + + // Verify both partitions were processed without error + assertEquals(2, responses.size) + assertTrue(responses.contains(topicIdPartition)) + assertTrue(responses.contains(topicIdPartition2)) + + assertEquals(Errors.NONE, responses(topicIdPartition).error) + assertEquals(highWatermark1, responses(topicIdPartition).highWatermark) + assertEquals(leaderLogStartOffset1, responses(topicIdPartition).logStartOffset) + + assertEquals(Errors.NONE, responses(topicIdPartition2).error) + assertEquals(highWatermark2, responses(topicIdPartition2).highWatermark) + assertEquals(leaderLogStartOffset2, responses(topicIdPartition2).logStartOffset) + } + + @Test + def testMultiplePartitionsWithFailedResults(): Unit = { + val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() + + def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { + responseSeq.foreach { case (tp, data) => + responses.put(tp, data) + } + } + + // Create futures - one successful, one with error + val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + + // Created 1 successful result and 1 failed result + future1.complete(buildRemoteReadResult(Errors.NONE)) + future2.complete(buildRemoteReadResult(Errors.UNKNOWN_SERVER_ERROR)) + + val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) + + val logReadInfo1 = buildReadResult(Errors.NONE, 100, 10) + val logReadInfo2 = buildReadResult(Errors.NONE, 200, 20) + + val fetchStatus1 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) + val fetchStatus2 = FetchPartitionStatus( + startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), + fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) + + // Set up maps for multiple partitions + val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() + val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() + + remoteFetchResults.put(topicIdPartition, future1) + remoteFetchResults.put(topicIdPartition2, future2) + remoteFetchInfos.put(topicIdPartition, fetchInfo1) + remoteFetchInfos.put(topicIdPartition2, fetchInfo2) + + val delayedRemoteFetch = new DelayedRemoteFetch( + Collections.emptyMap[TopicIdPartition, Future[Void]](), + remoteFetchResults, + remoteFetchInfos, + remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), + fetchParams, + Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), + replicaManager, + callback) + + when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) + .thenReturn(mock(classOf[Partition])) + when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) + .thenReturn(mock(classOf[Partition])) + + assertTrue(delayedRemoteFetch.tryComplete()) + assertTrue(delayedRemoteFetch.isCompleted) + + // Verify both partitions were processed + assertEquals(2, responses.size) + assertTrue(responses.contains(topicIdPartition)) + assertTrue(responses.contains(topicIdPartition2)) + + // First partition should be successful + val fetchResult1 = responses(topicIdPartition) + assertEquals(Errors.NONE, fetchResult1.error) + + // Second partition should have an error due to remote fetch failure + val fetchResult2 = responses(topicIdPartition2) + assertEquals(Errors.UNKNOWN_SERVER_ERROR, fetchResult2.error) } private def buildFetchParams(replicaId: Int, @@ -233,16 +480,22 @@ class DelayedRemoteFetchTest { private def buildReadResult(error: Errors, highWatermark: Int = 0, leaderLogStartOffset: Int = 0): LogReadResult = { - LogReadResult( - exception = if (error != Errors.NONE) Some(error.exception) else None, - info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - divergingEpoch = None, - highWatermark = highWatermark, - leaderLogStartOffset = leaderLogStartOffset, - leaderLogEndOffset = -1L, - followerLogStartOffset = -1L, - fetchTimeMs = -1L, - lastStableOffset = None) + new LogReadResult( + new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY, false, Optional.empty(), + Optional.of(mock(classOf[RemoteStorageFetchInfo]))), + Optional.empty(), + highWatermark, + leaderLogStartOffset, + -1L, + -1L, + -1L, + OptionalLong.empty(), + if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) } + private def buildRemoteReadResult(error: Errors): RemoteLogReadResult = { + new RemoteLogReadResult( + Optional.of(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY)), + if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) + } } diff --git a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala index 3c5cd9396bdba..170ee3679f47b 100644 --- a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala +++ b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala @@ -24,7 +24,7 @@ import java.lang.management.ManagementFactory import java.security.KeyStore import java.time.Duration import java.util -import java.util.{Collections, Optional, Properties} +import java.util.{Optional, Properties} import java.util.concurrent._ import javax.management.ObjectName import com.yammer.metrics.core.MetricName @@ -59,8 +59,10 @@ import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.MetadataLogConfig import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.metrics.{KafkaYammerMetrics, MetricConfigs} +import org.apache.kafka.server.ReplicaState import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.util.ShutdownableThread +import org.apache.kafka.server.quota.{ClientQuotaEntity, ClientQuotaManager} import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, UnifiedLog} import org.apache.kafka.test.TestSslUtils import org.junit.jupiter.api.Assertions._ @@ -495,7 +497,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup properties.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG, updatedMaxConnections) properties.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, connectionsIpsOverride) - TestUtils.incrementalAlterConfigs(servers, adminClients.head, properties, true) + TestUtils.incrementalAlterConfigs(servers, adminClients.head, properties, perBrokerConfig = true) servers.foreach(_.shutdown()) servers.foreach(_.awaitShutdown()) @@ -653,7 +655,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup "Config not updated in LogManager") val log = servers.head.logManager.getLog(new TopicPartition(topic, 0)).getOrElse(throw new IllegalStateException("Log not found")) - TestUtils.waitUntilTrue(() => log.config.segmentSize == 1048576, "Existing topic config using defaults not updated") + TestUtils.waitUntilTrue(() => log.config.segmentSize() == 1048576, "Existing topic config using defaults not updated") val KafkaConfigToLogConfigName: Map[String, String] = ServerTopicConfigSynonyms.TOPIC_CONFIG_SYNONYMS.asScala.map { case (k, v) => (v, k) } props.asScala.foreach { case (k, v) => @@ -741,7 +743,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup consumer.commitSync() def partitionInfo: TopicPartitionInfo = - adminClients.head.describeTopics(Collections.singleton(topic)).topicNameValues().get(topic).get().partitions().get(0) + adminClients.head.describeTopics(util.Set.of(topic)).topicNameValues().get(topic).get().partitions().get(0) val partitionInfo0 = partitionInfo assertEquals(partitionInfo0.replicas.get(0), partitionInfo0.leader) @@ -918,7 +920,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup private def verifyMarkPartitionsForTruncation(): Unit = { val leaderId = 0 val topicDescription = adminClients.head. - describeTopics(java.util.Arrays.asList(topic)). + describeTopics(java.util.List.of(topic)). allTopicNames(). get(3, TimeUnit.MINUTES).get(topic) val partitions = topicDescription.partitions().asScala. @@ -935,7 +937,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertEquals(replicaFetcherManager.getFetcherId(tp), fetcherThreads.head._1.fetcherId) val thread = fetcherThreads.head._2 assertEquals(Some(truncationOffset), thread.fetchState(tp).map(_.fetchOffset)) - assertEquals(Some(Truncating), thread.fetchState(tp).map(_.state)) + assertEquals(Some(ReplicaState.TRUNCATING), thread.fetchState(tp).map(_.state)) } } } @@ -963,9 +965,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup val clientId = "test-client-1" servers.foreach { server => server.quotaManagers.produce.updateQuota( - None, - Some(ClientQuotaManager.ClientIdEntity(clientId)), - Some(Quota.upperBound(10000000)) + Optional.empty, + Optional.of(new ClientQuotaManager.ClientIdEntity(clientId): ClientQuotaEntity.ConfigEntity), + Optional.of(Quota.upperBound(10000000)) ) } val (producerThread, consumerThread) = startProduceConsume(retries = 0, groupProtocol, clientId) @@ -1046,18 +1048,18 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup .map(_.asInstanceOf[DataPlaneAcceptor]).toSeq // add new PLAINTEXT listener - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, s"PLAINTEXT://localhost:0, $SecureInternal://localhost:0, $SecureExternal://localhost:0"), AlterConfigOp.OpType.SET) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => acceptors.size == 3, s"failed to add new DataPlaneAcceptor") // remove PLAINTEXT listener - client.incrementalAlterConfigs(Map(broker0Resource -> - Seq(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, + client.incrementalAlterConfigs(util.Map.of(broker0Resource, + util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0"), AlterConfigOp.OpType.SET) - ).asJavaCollection).asJava).all().get() + ))).all().get() TestUtils.waitUntilTrue(() => acceptors.size == 2, s"failed to remove DataPlaneAcceptor. current: ${acceptors.map(_.endPoint.toString).mkString(",")}") @@ -1098,9 +1100,13 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("getTestGroupProtocolParametersAll")) def testServersCanStartWithInvalidStaticConfigsAndValidDynamicConfigs(groupProtocol: String): Unit = { + TestNumReplicaFetcherMetricsReporter.testReporters.clear() + // modify snapshot interval config to explicitly take snapshot on a broker with valid dynamic configs val props = defaultStaticConfig(numServers) props.put(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, "10000") + props.put(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[TestNumReplicaFetcherMetricsReporter].getName) + props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "1") val kafkaConfig = KafkaConfig.fromProps(props) val newBroker = createBroker(kafkaConfig).asInstanceOf[BrokerServer] @@ -1108,6 +1114,15 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup alterSslKeystoreUsingConfigCommand(sslProperties1, listenerPrefix(SecureExternal)) + // Add num.replica.fetchers to the cluster-level config. + val clusterLevelProps = new Properties + clusterLevelProps.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2") + reconfigureServers(clusterLevelProps, perBrokerConfig = false, (ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2")) + + // Wait for the metrics reporter to be configured + val initialReporter = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head + initialReporter.verifyState(reconfigureCount = 1, numFetcher = 2) + TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) TestUtils.waitUntilTrue( @@ -1120,11 +1135,19 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup newBroker.shutdown() newBroker.awaitShutdown() + // Clean up the test reporter + TestNumReplicaFetcherMetricsReporter.testReporters.clear() + val invalidStaticConfigs = defaultStaticConfig(newBroker.config.brokerId) invalidStaticConfigs.putAll(securityProps(invalidSslConfigs, KEYSTORE_PROPS, listenerPrefix(SecureExternal))) newBroker.config.updateCurrentConfig(KafkaConfig.fromProps(invalidStaticConfigs)) newBroker.startup() + + // Verify that the custom MetricsReporter is not reconfigured after restart. + // If readDynamicBrokerConfigsFromSnapshot works correctly, the reporter should maintain its state. + val reporterAfterRestart = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head + reporterAfterRestart.verifyState(reconfigureCount = 0, numFetcher = 2) } private def awaitInitialPositions(consumer: Consumer[_, _]): Unit = { @@ -1212,11 +1235,11 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } val cert1 = load(trustStore1Props).getCertificate("kafka") val cert2 = load(trustStore2Props).getCertificate("kafka") - val certs = Map("kafka1" -> cert1, "kafka2" -> cert2) + val certs = util.Map.of("kafka1", cert1, "kafka2", cert2) val combinedStorePath = TestUtils.tempFile("truststore", ".jks").getAbsolutePath val password = trustStore1Props.get(SSL_TRUSTSTORE_PASSWORD_CONFIG).asInstanceOf[Password] - TestSslUtils.createTrustStore(combinedStorePath, password, certs.asJava) + TestSslUtils.createTrustStore(combinedStorePath, password, certs) val newStoreProps = new Properties newStoreProps.put(SSL_TRUSTSTORE_LOCATION_CONFIG, combinedStorePath) newStoreProps.put(SSL_TRUSTSTORE_PASSWORD_CONFIG, password) @@ -1380,7 +1403,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con private def tempPropertiesFile(properties: Properties): File = TestUtils.tempPropertiesFile(properties.asScala) - private abstract class ClientBuilder[T]() { + private abstract class ClientBuilder[T] { protected var _bootstrapServers: Option[String] = None protected var _listenerName: String = SecureExternal protected var _securityProtocol = SecurityProtocol.SASL_SSL @@ -1413,7 +1436,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con private var _retries = Int.MaxValue private var _acks = -1 private var _requestTimeoutMs = 30000 - private val defaultLingerMs = 5; + private val defaultLingerMs = 5 private var _deliveryTimeoutMs = 30000 + defaultLingerMs def maxRetries(retries: Int): ProducerBuilder = { _retries = retries; this } @@ -1457,7 +1480,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con val consumer = new KafkaConsumer[String, String](consumerProps, new StringDeserializer, new StringDeserializer) consumers += consumer - consumer.subscribe(Collections.singleton(_topic)) + consumer.subscribe(util.Set.of(_topic)) if (_autoOffsetReset == "latest") awaitInitialPositions(consumer) consumer @@ -1598,7 +1621,7 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close } override def reconfigurableConfigs(): util.Set[String] = { - Set(PollingIntervalProp).asJava + util.Set.of(PollingIntervalProp) } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { @@ -1633,6 +1656,64 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close } } +object TestNumReplicaFetcherMetricsReporter { + val testReporters = new ConcurrentLinkedQueue[TestNumReplicaFetcherMetricsReporter]() + + def waitForReporters(count: Int): List[TestNumReplicaFetcherMetricsReporter] = { + TestUtils.waitUntilTrue(() => testReporters.size == count, msg = "Metrics reporters size not matched. Expected: " + count + ", actual: " + testReporters.size()) + + val reporters = testReporters.asScala.toList + TestUtils.waitUntilTrue(() => reporters.forall(_.configureCount == 1), msg = "Metrics reporters not configured") + reporters + } +} + + +class TestNumReplicaFetcherMetricsReporter extends MetricsReporter { + import TestNumReplicaFetcherMetricsReporter._ + @volatile var configureCount = 0 + @volatile var reconfigureCount = 0 + @volatile var numFetchers: Int = 1 + testReporters.add(this) + + override def init(metrics: util.List[KafkaMetric]): Unit = { + } + + override def configure(configs: util.Map[String, _]): Unit = { + configureCount += 1 + numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt + } + + override def metricChange(metric: KafkaMetric): Unit = { + } + + override def metricRemoval(metric: KafkaMetric): Unit = { + } + + override def reconfigurableConfigs(): util.Set[String] = { + util.Set.of(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG) + } + + override def validateReconfiguration(configs: util.Map[String, _]): Unit = { + val numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt + if (numFetchers <= 0) + throw new ConfigException(s"Invalid num.replica.fetchers $numFetchers") + } + + override def reconfigure(configs: util.Map[String, _]): Unit = { + reconfigureCount += 1 + numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt + } + + override def close(): Unit = { + } + + def verifyState(reconfigureCount: Int, numFetcher: Int = 1): Unit = { + assertEquals(reconfigureCount, this.reconfigureCount) + assertEquals(numFetcher, this.numFetchers) + } +} + class MockFileConfigProvider extends FileConfigProvider { @throws(classOf[IOException]) diff --git a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala index ff86331e5cd3c..e50a6a96bc56a 100644 --- a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala @@ -32,7 +32,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import java.util -import java.util.{Collections, Properties} +import java.util.Properties import java.util.concurrent.{Executors, TimeUnit} import scala.jdk.CollectionConverters._ @@ -95,7 +95,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { TestUtils.generateAndProduceMessages(brokers, topic, numMessages = 1) val response = receive[FetchResponse](socket, ApiKeys.FETCH, version) assertEquals(Errors.NONE, response.error) - assertEquals(Map(Errors.NONE -> 2).asJava, response.errorCounts) + assertEquals(util.Map.of(Errors.NONE, 2), response.errorCounts) } finally { socket.close() } @@ -151,7 +151,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { consumerProps.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(consumerProps, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.subscribe(List(topic).asJava) + consumer.subscribe(util.List.of(topic)) // Wait until preferred replica is set to follower. TestUtils.waitUntilTrue(() => { @@ -240,15 +240,15 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { try { // Rack-based assignment results in partitions assigned in reverse order since partition racks are in the reverse order. - consumers.foreach(_.subscribe(Collections.singleton(topicWithSingleRackPartitions))) + consumers.foreach(_.subscribe(util.Set.of(topicWithSingleRackPartitions))) verifyAssignments(partitionList.reverse, topicWithSingleRackPartitions) // Non-rack-aware assignment results in ordered partitions. - consumers.foreach(_.subscribe(Collections.singleton(topicWithAllPartitionsOnAllRacks))) + consumers.foreach(_.subscribe(util.Set.of(topicWithAllPartitionsOnAllRacks))) verifyAssignments(partitionList, topicWithAllPartitionsOnAllRacks) // Rack-aware assignment with co-partitioning results in reverse assignment for both topics. - consumers.foreach(_.subscribe(Set(topicWithSingleRackPartitions, topicWithAllPartitionsOnAllRacks).asJava)) + consumers.foreach(_.subscribe(util.Set.of(topicWithSingleRackPartitions, topicWithAllPartitionsOnAllRacks))) verifyAssignments(partitionList.reverse, topicWithAllPartitionsOnAllRacks, topicWithSingleRackPartitions) // Perform reassignment for topicWithSingleRackPartitions to reverse the replica racks and @@ -256,7 +256,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { val admin = createAdminClient() val reassignments = new util.HashMap[TopicPartition, util.Optional[NewPartitionReassignment]]() partitionList.foreach { p => - val newAssignment = new NewPartitionReassignment(Collections.singletonList(p)) + val newAssignment = new NewPartitionReassignment(util.List.of(p)) reassignments.put(new TopicPartition(topicWithSingleRackPartitions, p), util.Optional.of(newAssignment)) } admin.alterPartitionReassignments(reassignments).all().get(30, TimeUnit.SECONDS) @@ -283,7 +283,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { ) val response = connectAndReceive[FetchResponse](request, brokers(leaderBrokerId).socketServer) assertEquals(Errors.NONE, response.error) - assertEquals(Map(Errors.NONE -> 2).asJava, response.errorCounts) + assertEquals(util.Map.of(Errors.NONE, 2), response.errorCounts) assertEquals(1, response.data.responses.size) val topicResponse = response.data.responses.get(0) assertEquals(1, topicResponse.partitions.size) diff --git a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala index 68b8e1108ff0d..575c612bf26a1 100644 --- a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala +++ b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala @@ -20,7 +20,7 @@ package kafka.server import java.net.InetSocketAddress import java.time.Duration -import java.util.{Collections, Properties} +import java.util.Properties import java.util.concurrent.{CountDownLatch, Executors, TimeUnit} import javax.security.auth.login.LoginContext import kafka.api.{IntegrationTestHarness, SaslSetup} @@ -38,11 +38,9 @@ import org.apache.kafka.common.security.kerberos.KerberosLogin import org.apache.kafka.common.utils.{LogContext, MockTime} import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{MethodSource, ValueSource} - -import scala.jdk.CollectionConverters._ +import org.junit.jupiter.params.provider.MethodSource class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { override val brokerCount = 1 @@ -92,9 +90,8 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos replay error `Request is a replay (34)` is not handled as an authentication exception * since replay detection used to detect DoS attacks may occasionally reject valid concurrent requests. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testRequestIsAReplay(quorum: String): Unit = { + @Test + def testRequestIsAReplay(): Unit = { val successfulAuthsPerThread = 10 val futures = (0 until numThreads).map(_ => executor.submit(new Runnable { override def run(): Unit = verifyRetriableFailuresDuringAuthentication(successfulAuthsPerThread) @@ -110,9 +107,8 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * are able to connect after the second re-login. Verifies that logout is performed only once * since duplicate logouts without successful login results in NPE from Java 9 onwards. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testLoginFailure(quorum: String): Unit = { + @Test + def testLoginFailure(): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -134,9 +130,8 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * is performed when credentials are unavailable between logout and login, we handle it as a * transient error and not an authentication failure so that clients may retry. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testReLogin(quorum: String): Unit = { + @Test + def testReLogin(): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -166,9 +161,8 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos error `Server not found in Kerberos database (7)` is handled * as a fatal authentication failure. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testServerNotFoundInKerberosDatabase(quorum: String): Unit = { + @Test + def testServerNotFoundInKerberosDatabase(): Unit = { val jaasConfig = clientConfig.getProperty(SaslConfigs.SASL_JAAS_CONFIG) val invalidServiceConfig = jaasConfig.replace("serviceName=\"kafka\"", "serviceName=\"invalid-service\"") clientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, invalidServiceConfig) @@ -188,7 +182,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { val configOverrides = new Properties() configOverrides.setProperty(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext) val consumer = createConsumer(configOverrides = configOverrides) - consumer.assign(List(tp).asJava) + consumer.assign(java.util.List.of(tp)) val startMs = System.currentTimeMillis() assertThrows(classOf[SaslAuthenticationException], () => consumer.poll(Duration.ofMillis(50))) @@ -267,7 +261,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { private def createSelectorWithRelogin(): Selector = { clientConfig.setProperty(SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN, "0") val config = new TestSecurityConfig(clientConfig) - val jaasContexts = Collections.singletonMap("GSSAPI", JaasContext.loadClientContext(config.values())) + val jaasContexts = java.util.Map.of("GSSAPI", JaasContext.loadClientContext(config.values())) val channelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, null, false, kafkaClientSaslMechanism, null, null, null, time, new LogContext(), _ => org.apache.kafka.test.TestUtils.defaultApiVersionsResponse(ListenerType.BROKER)) { diff --git a/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala b/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala deleted file mode 100644 index 165f95e3a62a9..0000000000000 --- a/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.network.SocketServer -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.ApiKeys -import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, RequestHeader, ResponseHeader} -import org.apache.kafka.common.utils.Utils - -import java.io.{DataInputStream, DataOutputStream} -import java.net.Socket -import java.nio.ByteBuffer -import scala.reflect.ClassTag - -object IntegrationTestUtils { - - def sendRequest(socket: Socket, request: Array[Byte]): Unit = { - val outgoing = new DataOutputStream(socket.getOutputStream) - outgoing.writeInt(request.length) - outgoing.write(request) - outgoing.flush() - } - - private def sendWithHeader(request: AbstractRequest, header: RequestHeader, socket: Socket): Unit = { - val serializedBytes = Utils.toArray(request.serializeWithHeader(header)) - sendRequest(socket, serializedBytes) - } - - def nextRequestHeader[T <: AbstractResponse](apiKey: ApiKeys, - apiVersion: Short, - clientId: String = "client-id", - correlationIdOpt: Option[Int] = None): RequestHeader = { - val correlationId = correlationIdOpt.getOrElse { - this.correlationId += 1 - this.correlationId - } - new RequestHeader(apiKey, apiVersion, clientId, correlationId) - } - - def send(request: AbstractRequest, - socket: Socket, - clientId: String = "client-id", - correlationId: Option[Int] = None): Unit = { - val header = nextRequestHeader(request.apiKey, request.version, clientId, correlationId) - sendWithHeader(request, header, socket) - } - - def receive[T <: AbstractResponse](socket: Socket, apiKey: ApiKeys, version: Short) - (implicit classTag: ClassTag[T]): T = { - val incoming = new DataInputStream(socket.getInputStream) - val len = incoming.readInt() - - val responseBytes = new Array[Byte](len) - incoming.readFully(responseBytes) - - val responseBuffer = ByteBuffer.wrap(responseBytes) - ResponseHeader.parse(responseBuffer, apiKey.responseHeaderVersion(version)) - - AbstractResponse.parseResponse(apiKey, responseBuffer, version) match { - case response: T => response - case response => - throw new ClassCastException(s"Expected response with type ${classTag.runtimeClass}, but found ${response.getClass}") - } - } - - def sendAndReceive[T <: AbstractResponse](request: AbstractRequest, - socket: Socket, - clientId: String = "client-id", - correlationId: Option[Int] = None) - (implicit classTag: ClassTag[T]): T = { - send(request, socket, clientId, correlationId) - receive[T](socket, request.apiKey, request.version) - } - - def connectAndReceive[T <: AbstractResponse](request: AbstractRequest, - destination: SocketServer, - listenerName: ListenerName) - (implicit classTag: ClassTag[T]): T = { - val socket = connect(destination, listenerName) - try sendAndReceive[T](request, socket) - finally socket.close() - } - - private var correlationId = 0 - - def connect(socketServer: SocketServer, - listenerName: ListenerName): Socket = { - new Socket("localhost", socketServer.boundPort(listenerName)) - } -} diff --git a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala index 770c519dfae13..6f552a8ebe96c 100644 --- a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala +++ b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala @@ -18,8 +18,8 @@ package kafka.server import kafka.network.SocketServer -import kafka.server.IntegrationTestUtils.connectAndReceive import kafka.utils.TestUtils +import org.apache.kafka.server.IntegrationTestUtils.connectAndReceive import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin._ import org.apache.kafka.common.acl.{AclBinding, AclBindingFilter} @@ -43,7 +43,7 @@ import org.apache.kafka.metadata.bootstrap.BootstrapMetadata import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, MetadataVersion} -import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.quota import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaType} @@ -60,10 +60,11 @@ import java.nio.file.{FileSystems, Files, Path, Paths} import java.{lang, util} import java.util.concurrent.{CompletableFuture, CompletionStage, ExecutionException, TimeUnit} import java.util.concurrent.atomic.AtomicInteger -import java.util.{Collections, Optional, OptionalLong, Properties} +import java.util.{Optional, OptionalLong, Properties} import scala.collection.{Seq, mutable} import scala.concurrent.duration.{FiniteDuration, MILLISECONDS, SECONDS} import scala.jdk.CollectionConverters._ +import scala.util.Using @Timeout(120) @Tag("integration") @@ -112,7 +113,7 @@ class KRaftClusterTest { cluster.format() cluster.startup() val controller = cluster.controllers().values().iterator().asScala.filter(_.controller.isActive).next() - val port = controller.socketServer.boundPort(controller.config.controllerListeners.head.listenerName) + val port = controller.socketServer.boundPort(ListenerName.normalised(controller.config.controllerListeners.head.listener)) // shutdown active controller controller.shutdown() @@ -129,6 +130,32 @@ class KRaftClusterTest { } } + @Test + def testClusterWithLowerCaseListeners(): Unit = { + Using.resource(new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setBrokerListenerName(new ListenerName("external")). + setNumControllerNodes(3). + build()).build() + ) { cluster => + cluster.format() + cluster.startup() + cluster.brokers().forEach((_, broker) => { + assertEquals(util.List.of("external://localhost:0"), broker.config.get(SocketServerConfigs.LISTENERS_CONFIG)) + assertEquals("external", broker.config.get(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)) + assertEquals("external:PLAINTEXT,CONTROLLER:PLAINTEXT", broker.config.get(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) + }) + TestUtils.waitUntilTrue(() => cluster.brokers().get(0).brokerState == BrokerState.RUNNING, + "Broker never made it to RUNNING state.") + TestUtils.waitUntilTrue(() => cluster.raftManagers().get(0).client.leaderAndEpoch().leaderId.isPresent, + "RaftManager was not initialized.") + Using.resource(Admin.create(cluster.clientProperties())) { admin => + assertEquals(cluster.nodes().clusterId(), admin.describeCluster().clusterId().get()) + } + } + } + @Test def testCreateClusterAndWaitForBrokerInRunningState(): Unit = { val cluster = new KafkaClusterTestKit.Builder( @@ -172,13 +199,13 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 3.toShort)) + val newTopic = util.List.of(new NewTopic("test-topic", 1, 3.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) // Delete topic - val deleteResult = admin.deleteTopics(Collections.singletonList("test-topic")) + val deleteResult = admin.deleteTopics(util.List.of("test-topic")) deleteResult.all().get() // List again @@ -238,7 +265,7 @@ class KRaftClusterTest { "Broker never made it to RUNNING state.") val admin = Admin.create(cluster.clientProperties()) try { - val entity = new ClientQuotaEntity(Map("user" -> "testkit").asJava) + val entity = new ClientQuotaEntity(util.Map.of("user", "testkit")) var filter = ClientQuotaFilter.containsOnly( List(ClientQuotaFilterComponent.ofEntity("user", "testkit")).asJava) @@ -246,7 +273,7 @@ class KRaftClusterTest { quotas: Seq[ClientQuotaAlteration.Op], filter: ClientQuotaFilter, expectCount: Int): util.Map[ClientQuotaEntity, util.Map[String, lang.Double]] = { - val alterResult = admin.alterClientQuotas(Seq(new ClientQuotaAlteration(entity, quotas.asJava)).asJava) + val alterResult = admin.alterClientQuotas(util.List.of(new ClientQuotaAlteration(entity, quotas.asJava))) try { alterResult.all().get() } catch { @@ -262,7 +289,7 @@ class KRaftClusterTest { } val (describeResult, ok) = TestUtils.computeUntilTrue(describeOrFail(filter)) { - results => results.getOrDefault(entity, util.Collections.emptyMap[String, lang.Double]()).size() == expectCount + results => results.getOrDefault(entity, util.Map.of[String, lang.Double]()).size() == expectCount } assertTrue(ok, "Broker never saw new client quotas") describeResult @@ -296,19 +323,19 @@ class KRaftClusterTest { assertEquals(9999.0, describeResult.get(entity).get("producer_byte_rate"), 1e-6) // Add another quota for a different entity with same user part - val entity2 = new ClientQuotaEntity(Map("user" -> "testkit", "client-id" -> "some-client").asJava) + val entity2 = new ClientQuotaEntity(util.Map.of("user", "testkit", "client-id", "some-client")) filter = ClientQuotaFilter.containsOnly( - List( + util.List.of( ClientQuotaFilterComponent.ofEntity("user", "testkit"), ClientQuotaFilterComponent.ofEntity("client-id", "some-client"), - ).asJava) + )) describeResult = alterThenDescribe(entity2, Seq(new ClientQuotaAlteration.Op("producer_byte_rate", 9998)), filter, 1) assertEquals(9998.0, describeResult.get(entity2).get("producer_byte_rate"), 1e-6) // non-strict match filter = ClientQuotaFilter.contains( - List(ClientQuotaFilterComponent.ofEntity("user", "testkit")).asJava) + util.List.of(ClientQuotaFilterComponent.ofEntity("user", "testkit"))) TestUtils.tryUntilNoAssertionError() { val results = admin.describeClientQuotas(filter).entities().get() @@ -329,14 +356,14 @@ class KRaftClusterTest { entity: ClientQuotaEntity, value: Long ): Unit = { - admin.alterClientQuotas(Collections.singletonList( - new ClientQuotaAlteration(entity, Collections.singletonList( + admin.alterClientQuotas(util.List.of( + new ClientQuotaAlteration(entity, util.List.of( new Op("consumer_byte_rate", value.doubleValue()))))). all().get() } def getConsumerByteRates(admin: Admin): Map[ClientQuotaEntity, Long] = { - val allFilter = ClientQuotaFilter.contains(Collections.emptyList()) + val allFilter = ClientQuotaFilter.contains(util.List.of) val results = new util.HashMap[ClientQuotaEntity, Long] admin.describeClientQuotas(allFilter).entities().get().forEach { case (entity, entityMap) => @@ -358,8 +385,8 @@ class KRaftClusterTest { "Broker never made it to RUNNING state.") val admin = Admin.create(cluster.clientProperties()) try { - val defaultUser = new ClientQuotaEntity(Collections.singletonMap[String, String]("user", null)) - val bobUser = new ClientQuotaEntity(Collections.singletonMap[String, String]("user", "bob")) + val defaultUser = new ClientQuotaEntity(util.Collections.singletonMap[String, String]("user", null)) + val bobUser = new ClientQuotaEntity(util.Map.of[String, String]("user", "bob")) TestUtils.retry(30000) { assertEquals(Map(), getConsumerByteRates(admin)) } @@ -491,12 +518,10 @@ class KRaftClusterTest { } private def sendDescribeClusterRequestToBoundPort(destination: SocketServer, - listenerName: ListenerName): DescribeClusterResponse = - connectAndReceive[DescribeClusterResponse]( - request = new DescribeClusterRequest.Builder(new DescribeClusterRequestData()).build(), - destination = destination, - listenerName = listenerName - ) + listenerName: ListenerName): DescribeClusterResponse = { + connectAndReceive[DescribeClusterResponse](new DescribeClusterRequest.Builder(new DescribeClusterRequestData()).build(), + destination.boundPort(listenerName)) + } @Test def testCreateClusterAndPerformReassignment(): Unit = { @@ -512,26 +537,26 @@ class KRaftClusterTest { try { // Create the topic. val assignments = new util.HashMap[Integer, util.List[Integer]] - assignments.put(0, util.Arrays.asList(0, 1, 2)) - assignments.put(1, util.Arrays.asList(1, 2, 3)) - assignments.put(2, util.Arrays.asList(2, 3, 0)) - assignments.put(3, util.Arrays.asList(3, 2, 1)) - val createTopicResult = admin.createTopics(Collections.singletonList( + assignments.put(0, util.List.of(0, 1, 2)) + assignments.put(1, util.List.of(1, 2, 3)) + assignments.put(2, util.List.of(2, 3, 0)) + assignments.put(3, util.List.of(3, 2, 1)) + val createTopicResult = admin.createTopics(util.List.of( new NewTopic("foo", assignments))) createTopicResult.all().get() waitForTopicListing(admin, Seq("foo"), Seq()) // Start some reassignments. - assertEquals(Collections.emptyMap(), admin.listPartitionReassignments().reassignments().get()) + assertEquals(util.Map.of, admin.listPartitionReassignments().reassignments().get()) val reassignments = new util.HashMap[TopicPartition, Optional[NewPartitionReassignment]] reassignments.put(new TopicPartition("foo", 0), - Optional.of(new NewPartitionReassignment(util.Arrays.asList(2, 1, 0)))) + Optional.of(new NewPartitionReassignment(util.List.of(2, 1, 0)))) reassignments.put(new TopicPartition("foo", 1), - Optional.of(new NewPartitionReassignment(util.Arrays.asList(0, 1, 2)))) + Optional.of(new NewPartitionReassignment(util.List.of(0, 1, 2)))) reassignments.put(new TopicPartition("foo", 2), - Optional.of(new NewPartitionReassignment(util.Arrays.asList(2, 3)))) + Optional.of(new NewPartitionReassignment(util.List.of(2, 3)))) reassignments.put(new TopicPartition("foo", 3), - Optional.of(new NewPartitionReassignment(util.Arrays.asList(3, 2, 0, 1)))) + Optional.of(new NewPartitionReassignment(util.List.of(3, 2, 0, 1)))) admin.alterPartitionReassignments(reassignments).all().get() TestUtils.waitUntilTrue( () => admin.listPartitionReassignments().reassignments().get().isEmpty, @@ -539,7 +564,7 @@ class KRaftClusterTest { var currentMapping: Seq[Seq[Int]] = Seq() val expectedMapping = Seq(Seq(2, 1, 0), Seq(0, 1, 2), Seq(2, 3), Seq(3, 2, 0, 1)) TestUtils.waitUntilTrue( () => { - val topicInfoMap = admin.describeTopics(Collections.singleton("foo")).allTopicNames().get() + val topicInfoMap = admin.describeTopics(util.Set.of("foo")).allTopicNames().get() if (topicInfoMap.containsKey("foo")) { currentMapping = translatePartitionInfoToSeq(topicInfoMap.get("foo").partitions()) expectedMapping.equals(currentMapping) @@ -687,7 +712,7 @@ class KRaftClusterTest { ("max.connections.per.ip", "60"), ("min.insync.replicas", "1"))), exhaustive = true) - admin.createTopics(util.Arrays.asList( + admin.createTopics(util.List.of( new NewTopic("foo", 2, 3.toShort), new NewTopic("bar", 2, 3.toShort))).all().get() TestUtils.waitForAllPartitionsMetadata(cluster.brokers().values().asScala.toSeq, "foo", 2) @@ -782,7 +807,7 @@ class KRaftClusterTest { val cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). setNumBrokerNodes(4). - setBootstrapMetadataVersion(MetadataVersion.fromVersionString(metadataVersionString)). + setBootstrapMetadataVersion(MetadataVersion.fromVersionString(metadataVersionString, true)). setNumControllerNodes(3).build()). build() try { @@ -791,14 +816,14 @@ class KRaftClusterTest { cluster.waitForReadyBrokers() val admin = Admin.create(cluster.clientProperties()) try { - val createResults = admin.createTopics(util.Arrays.asList( + val createResults = admin.createTopics(util.List.of( new NewTopic("foo", 1, 3.toShort), new NewTopic("bar", 2, 3.toShort))).values() createResults.get("foo").get() createResults.get("bar").get() - val increaseResults = admin.createPartitions(Map( - "foo" -> NewPartitions.increaseTo(3), - "bar" -> NewPartitions.increaseTo(2)).asJava).values() + val increaseResults = admin.createPartitions(util.Map.of( + "foo", NewPartitions.increaseTo(3), + "bar", NewPartitions.increaseTo(2))).values() increaseResults.get("foo").get() assertEquals(classOf[InvalidPartitionsException], assertThrows( classOf[ExecutionException], () => increaseResults.get("bar").get()).getCause.getClass) @@ -849,7 +874,7 @@ class KRaftClusterTest { cluster.brokers().get(0).shutdown() TestUtils.waitUntilTrue(() => !brokerIsUnfenced(clusterImage(cluster, 1), 0), "Timed out waiting for broker 0 to be fenced.") - val admin = createAdminClient(cluster, bootstrapController = usingBootstrapController); + val admin = createAdminClient(cluster, bootstrapController = usingBootstrapController) try { admin.unregisterBroker(0) } finally { @@ -989,8 +1014,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.updateFeatures( - Map(MetadataVersion.FEATURE_NAME -> - new FeatureUpdate(MetadataVersion.latestTesting().featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions + util.Map.of(MetadataVersion.FEATURE_NAME, + new FeatureUpdate(MetadataVersion.latestTesting().featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)), new UpdateFeaturesOptions ) assertEquals(new SupportedVersionRange(0, 1), admin.describeFeatures().featureMetadata().get(). supportedFeatures().get(KRaftVersion.FEATURE_NAME)) @@ -1010,8 +1035,7 @@ class KRaftClusterTest { val cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). setNumBrokerNodes(1). - setNumControllerNodes(1). - setFeature(KRaftVersion.FEATURE_NAME, 1.toShort).build()).build() + setNumControllerNodes(1).build()).setStandalone(true).build() try { cluster.format() cluster.startup() @@ -1083,13 +1107,13 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 1.toShort)) + val newTopic = util.List.of(new NewTopic("test-topic", 1, 1.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) // Delete topic - val deleteResult = admin.deleteTopics(Collections.singletonList("test-topic")) + val deleteResult = admin.deleteTopics(util.List.of("test-topic")) deleteResult.all().get() // List again @@ -1202,8 +1226,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - Collections.singletonMap(new ConfigResource(Type.BROKER, ""), - Collections.singletonList(new AlterConfigOp( + util.Map.of(new ConfigResource(Type.BROKER, ""), + util.List.of(new AlterConfigOp( new ConfigEntry(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey, "1"), OpType.SET)))). all().get() } finally { @@ -1243,8 +1267,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - Collections.singletonMap(new ConfigResource(Type.BROKER, ""), - Collections.singletonList(new AlterConfigOp( + util.Map.of(new ConfigResource(Type.BROKER, ""), + util.List.of(new AlterConfigOp( new ConfigEntry(FakeConfigurableAuthorizer.foobarConfigKey, "123"), OpType.SET)))). all().get() } finally { @@ -1365,7 +1389,7 @@ class KRaftClusterTest { @Test def testStartupWithNonDefaultKControllerDynamicConfiguration(): Unit = { - val bootstrapRecords = util.Arrays.asList( + val bootstrapRecords = util.List.of( new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(MetadataVersion.IBP_3_7_IV0.featureLevel), 0.toShort), @@ -1409,7 +1433,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.Arrays.asList( + admin.createTopics(util.List.of( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1421,7 +1445,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent()) + assertTrue(info.isPresent) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1464,7 +1488,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.Arrays.asList( + admin.createTopics(util.List.of( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1476,7 +1500,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent()) + assertTrue(info.isPresent) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1490,7 +1514,7 @@ class KRaftClusterTest { broker0.startup() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent()) + assertTrue(info.isPresent) assertEquals(Set(0, 1, 2), info.get.isr().asScala.toSet) assertTrue(broker0.logManager.getLog(foo0, isFuture = true).isEmpty) } @@ -1504,11 +1528,11 @@ class KRaftClusterTest { def copyDirectory(src: String, dest: String): Unit = { Files.walk(Paths.get(src)).forEach(p => { - val out = Paths.get(dest, p.toString().substring(src.length())) - if (!p.toString().equals(src)) { - Files.copy(p, out); + val out = Paths.get(dest, p.toString.substring(src.length())) + if (!p.toString.equals(src)) { + Files.copy(p, out) } - }); + }) } @Test @@ -1529,7 +1553,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.Arrays.asList( + admin.createTopics(util.List.of( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1541,7 +1565,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent()) + assertTrue(info.isPresent) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1550,10 +1574,10 @@ class KRaftClusterTest { // Copy foo-0 to targetParentDir // This is so that we can rename the main replica to a future down below val parentDir = log.parentDir - val targetParentDir = broker0.config.logDirs.filter(_ != parentDir).head + val targetParentDir = broker0.config.logDirs.stream().filter(l => !l.equals(parentDir)).findFirst().get() val targetDirFile = new File(targetParentDir, log.dir.getName) targetDirFile.mkdir() - copyDirectory(log.dir.toString(), targetDirFile.toString()) + copyDirectory(log.dir.toString, targetDirFile.toString) assertTrue(targetDirFile.exists()) // Rename original log to a future @@ -1567,7 +1591,7 @@ class KRaftClusterTest { broker0.startup() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent()) + assertTrue(info.isPresent) assertEquals(Set(0, 1, 2), info.get.isr().asScala.toSet) assertTrue(broker0.logManager.getLog(foo0, isFuture = true).isEmpty) assertFalse(targetDirFile.exists()) @@ -1599,7 +1623,7 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - admin.createTopics(Collections.singletonList( + admin.createTopics(util.List.of( new NewTopic("test-topic", 1, 1.toShort))).all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) @@ -1608,7 +1632,7 @@ class KRaftClusterTest { cluster.raftManagers().get(active.asInstanceOf[QuorumController].nodeId()).shutdown() // Create a test topic on the new active controller - admin.createTopics(Collections.singletonList( + admin.createTopics(util.List.of( new NewTopic("test-topic2", 1, 1.toShort))).all().get() waitForTopicListing(admin, Seq("test-topic2"), Seq()) } finally { @@ -1619,6 +1643,51 @@ class KRaftClusterTest { } } + /** + * Test that once a cluster is formatted, a bootstrap.metadata file that contains an unsupported + * MetadataVersion is not a problem. This is a regression test for KAFKA-19192. + */ + @Test + def testOldBootstrapMetadataFile(): Unit = { + val baseDirectory = TestUtils.tempDir().toPath() + Using.resource(new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setNumControllerNodes(1). + setBaseDirectory(baseDirectory). + build()). + setDeleteOnClose(false). + build() + ) { cluster => + cluster.format() + cluster.startup() + cluster.waitForReadyBrokers() + } + val oldBootstrapMetadata = BootstrapMetadata.fromRecords( + util.List.of( + new ApiMessageAndVersion( + new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(1), + 0.toShort) + ), + "oldBootstrapMetadata") + // Re-create the cluster using the same directory structure as above. + // Since we do not need to use the bootstrap metadata, the fact that + // it specifies an obsolete metadata.version should not be a problem. + Using.resource(new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setNumControllerNodes(1). + setBaseDirectory(baseDirectory). + setBootstrapMetadata(oldBootstrapMetadata). + build()).build() + ) { cluster => + cluster.startup() + cluster.waitForReadyBrokers() + } + } + @Test def testIncreaseNumIoThreads(): Unit = { val cluster = new KafkaClusterTestKit.Builder( @@ -1634,10 +1703,10 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - Collections.singletonMap(new ConfigResource(Type.BROKER, ""), - Collections.singletonList(new AlterConfigOp( + util.Map.of(new ConfigResource(Type.BROKER, ""), + util.List.of(new AlterConfigOp( new ConfigEntry(ServerConfigs.NUM_IO_THREADS_CONFIG, "8"), OpType.SET)))).all().get() - val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 1.toShort)) + val newTopic = util.List.of(new NewTopic("test-topic", 1, 1.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) @@ -1675,7 +1744,7 @@ object DummyClientQuotaCallback { class DummyClientQuotaCallback extends ClientQuotaCallback with Reconfigurable { var value = 0 - override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = Collections.emptyMap() + override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = util.Map.of override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = 1.0 @@ -1696,7 +1765,7 @@ class DummyClientQuotaCallback extends ClientQuotaCallback with Reconfigurable { } } - override def reconfigurableConfigs(): util.Set[String] = Set(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey).asJava + override def reconfigurableConfigs(): util.Set[String] = util.Set.of(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey) override def validateReconfiguration(configs: util.Map[String, _]): Unit = { } @@ -1735,7 +1804,7 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { }).toMap.asJava } - override def reconfigurableConfigs(): util.Set[String] = Set(foobarConfigKey).asJava + override def reconfigurableConfigs(): util.Set[String] = util.Set.of(foobarConfigKey) override def validateReconfiguration(configs: util.Map[String, _]): Unit = { fakeConfigurableAuthorizerConfigToInt(configs) @@ -1749,7 +1818,7 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { actions.asScala.map(_ => AuthorizationResult.ALLOWED).toList.asJava } - override def acls(filter: AclBindingFilter): lang.Iterable[AclBinding] = List[AclBinding]().asJava + override def acls(filter: AclBindingFilter): lang.Iterable[AclBinding] = util.List.of[AclBinding]() override def close(): Unit = {} @@ -1761,13 +1830,13 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { requestContext: AuthorizableRequestContext, aclBindings: util.List[AclBinding] ): util.List[_ <: CompletionStage[AclCreateResult]] = { - Collections.emptyList() + util.List.of } override def deleteAcls( requestContext: AuthorizableRequestContext, aclBindingFilters: util.List[AclBindingFilter] ): util.List[_ <: CompletionStage[AclDeleteResult]] = { - Collections.emptyList() + util.List.of } } diff --git a/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala deleted file mode 100644 index 490ebc48c1648..0000000000000 --- a/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import org.apache.kafka.common.test.api.{ClusterTest, ClusterTests, Type} -import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType -import org.apache.kafka.clients.admin.{FeatureUpdate, UpdateFeaturesOptions} -import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.common.MetadataVersion -import org.junit.jupiter.api.Assertions.assertEquals - -import scala.jdk.CollectionConverters._ - -class MetadataVersionIntegrationTest { - @ClusterTests(value = Array( - new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_3_IV3), - new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_4_IV0), - new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_5_IV0), - new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_6_IV0), - new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_6_IV1) - )) - def testBasicMetadataVersionUpgrade(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.admin() - try { - val describeResult = admin.describeFeatures() - val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - assertEquals(ff.minVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) - assertEquals(ff.maxVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) - - // Update to new version - val updateVersion = MetadataVersion.IBP_3_7_IV1.featureLevel.shortValue - val updateResult = admin.updateFeatures( - Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) - updateResult.all().get() - - // Verify that new version is visible on broker - TestUtils.waitUntilTrue(() => { - val describeResult2 = admin.describeFeatures() - val ff2 = describeResult2.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - ff2.minVersionLevel() == updateVersion && ff2.maxVersionLevel() == updateVersion - }, "Never saw metadata.version increase on broker") - } finally { - admin.close() - } - } - - @ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_9_IV0) - def testUpgradeSameVersion(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.admin() - try { - val updateVersion = MetadataVersion.IBP_3_9_IV0.featureLevel.shortValue - val updateResult = admin.updateFeatures( - Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) - updateResult.all().get() - } finally { - admin.close() - } - } - - @ClusterTest(types = Array(Type.KRAFT)) - def testDefaultIsLatestVersion(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.admin() - try { - val describeResult = admin.describeFeatures() - val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - assertEquals(ff.minVersionLevel(), MetadataVersion.latestTesting().featureLevel(), - "If this test fails, check the default MetadataVersion in the @ClusterTest annotation") - assertEquals(ff.maxVersionLevel(), MetadataVersion.latestTesting().featureLevel()) - } finally { - admin.close() - } - } -} diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala index 8bf7860d151a0..969b069fc5d25 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala @@ -39,7 +39,7 @@ class MultipleListenersWithAdditionalJaasContextTest extends MultipleListenersWi val props = new Properties kafkaServerSaslMechanisms(SecureInternal).foreach { mechanism => addDynamicJaasSection(props, SecureInternal, mechanism, - JaasTestUtils.kafkaServerSection("secure_internal.KafkaServer", Seq(mechanism).asJava, None.toJava)) + JaasTestUtils.kafkaServerSection("secure_internal.KafkaServer", java.util.List.of(mechanism), None.toJava)) } props } diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala index bcde663eb5258..8b6256663b6de 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala @@ -18,7 +18,7 @@ package kafka.server -import java.util.{Collections, Objects, Optional, Properties} +import java.util.{Objects, Optional, Properties} import java.util.concurrent.TimeUnit import kafka.api.SaslSetup import kafka.security.JaasTestUtils @@ -126,13 +126,13 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT servers.head.groupCoordinator.groupMetadataTopicConfigs.entrySet(). forEach(e => newTopicConfigs.put(e.getKey.toString, e.getValue.toString)) newTopic.configs(newTopicConfigs) - admin.createTopics(java.util.Arrays.asList(newTopic)).all().get(5, TimeUnit.MINUTES) + admin.createTopics(java.util.List.of(newTopic)).all().get(5, TimeUnit.MINUTES) createScramCredentials(admin, JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_PASSWORD) TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) servers.head.config.listeners.foreach { endPoint => - val listenerName = endPoint.listenerName + val listenerName = ListenerName.normalised(endPoint.listener) val trustStoreFile = if (JaasTestUtils.usesSslTransportLayer(endPoint.securityProtocol)) Some(this.trustStoreFile) @@ -143,7 +143,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT def addProducerConsumer(listenerName: ListenerName, mechanism: String, saslProps: Option[Properties]): Unit = { val topic = s"${listenerName.value}${producers.size}" - admin.createTopics(java.util.Arrays.asList(new NewTopic(topic, 2, 2.toShort))).all().get(5, TimeUnit.MINUTES) + admin.createTopics(java.util.List.of(new NewTopic(topic, 2, 2.toShort))).all().get(5, TimeUnit.MINUTES) val clientMetadata = ClientMetadata(listenerName, mechanism, topic) producers(clientMetadata) = TestUtils.createProducer(bootstrapServers, acks = -1, @@ -155,7 +155,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT } if (JaasTestUtils.usesSaslAuthentication(endPoint.securityProtocol)) { - kafkaServerSaslMechanisms(endPoint.listenerName.value).foreach { mechanism => + kafkaServerSaslMechanisms(endPoint.listener).foreach { mechanism => addProducerConsumer(listenerName, mechanism, Some(kafkaClientSaslProperties(mechanism, dynamicJaasConfig = true))) } } else { @@ -188,7 +188,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT producerRecords.map(producer.send).map(_.get(10, TimeUnit.SECONDS)) val consumer = consumers(clientMetadata) - consumer.subscribe(Collections.singleton(clientMetadata.topic)) + consumer.subscribe(java.util.Set.of(clientMetadata.topic)) TestUtils.consumeRecords(consumer, producerRecords.size) } } diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index 5e3f421caf2cc..6af0932690f92 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -20,7 +20,7 @@ package kafka.server import java.io.File import java.net.InetSocketAddress import java.util -import java.util.{Collections, Locale, Optional, OptionalInt, Properties, stream} +import java.util.{Locale, Optional, OptionalInt, Properties, stream} import java.util.concurrent.{CompletableFuture, TimeUnit} import javax.security.auth.login.Configuration import kafka.utils.{CoreUtils, Logging, TestInfoUtils, TestUtils} @@ -40,7 +40,7 @@ import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.queue.KafkaEventQueue import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} import org.apache.kafka.server.{ClientMetricsManager, ServerSocketFactory} -import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, MetadataVersion, TransactionVersion} +import org.apache.kafka.server.common.{MetadataVersion, TransactionVersion} import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.{FaultHandler, MockFaultHandler} import org.apache.kafka.server.util.timer.SystemTimer @@ -81,7 +81,7 @@ class KRaftQuorumImplementation( ): KafkaBroker = { val metaPropertiesEnsemble = { val loader = new MetaPropertiesEnsemble.Loader() - loader.addLogDirs(config.logDirs.asJava) + loader.addLogDirs(config.logDirs) loader.addMetadataLogDir(config.metadataLogDir) val ensemble = loader.load() val copier = new MetaPropertiesEnsemble.Copier(ensemble) @@ -159,10 +159,6 @@ abstract class QuorumTestHarness extends Logging { private var testInfo: TestInfo = _ protected var implementation: QuorumImplementation = _ - def isShareGroupTest(): Boolean = { - TestInfoUtils.isShareGroupTest(testInfo) - } - def maybeGroupProtocolSpecified(): Option[GroupProtocol] = { TestInfoUtils.maybeGroupProtocolSpecified(testInfo) } @@ -267,8 +263,10 @@ abstract class QuorumTestHarness extends Logging { val listeners = extraControllerSecurityProtocols().map(sc => sc + "://localhost:0").mkString(",") val listenerNames = extraControllerSecurityProtocols().mkString(",") props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"CONTROLLER:$proto,$securityProtocolMaps") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"CONTROLLER://localhost:0,$listeners") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, s"CONTROLLER,$listenerNames") + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, + if (listeners.isEmpty) "CONTROLLER://localhost:0" else s"CONTROLLER://localhost:0,$listeners") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, + if (listeners.isEmpty) "CONTROLLER" else s"CONTROLLER,$listenerNames") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$nodeId@localhost:0") props.setProperty(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG, "1000") val config = new KafkaConfig(props) @@ -279,7 +277,7 @@ abstract class QuorumTestHarness extends Logging { formatter.addDirectory(metadataDir.getAbsolutePath) formatter.setReleaseVersion(metadataVersion) formatter.setUnstableFeatureVersionsEnabled(true) - formatter.setControllerListenerName(config.controllerListenerNames.head) + formatter.setControllerListenerName(config.controllerListenerNames.get(0)) formatter.setMetadataLogDirectory(config.metadataLogDir) val transactionVersion = @@ -288,12 +286,6 @@ abstract class QuorumTestHarness extends Logging { } else TransactionVersion.TV_1.featureLevel() formatter.setFeatureLevel(TransactionVersion.FEATURE_NAME, transactionVersion) - val elrVersion = - if (TestInfoUtils.isEligibleLeaderReplicasV1Enabled(testInfo)) { - EligibleLeaderReplicasVersion.ELRV_1.featureLevel() - } else EligibleLeaderReplicasVersion.ELRV_0.featureLevel() - formatter.setFeatureLevel(EligibleLeaderReplicasVersion.FEATURE_NAME, elrVersion) - addFormatterSettings(formatter) formatter.run() val bootstrapMetadata = formatter.bootstrapMetadata() @@ -311,7 +303,7 @@ abstract class QuorumTestHarness extends Logging { Time.SYSTEM, new Metrics(), controllerQuorumVotersFuture, - Collections.emptyList(), + util.List.of, faultHandlerFactory, ServerSocketFactory.INSTANCE, ) @@ -328,7 +320,7 @@ abstract class QuorumTestHarness extends Logging { controllerQuorumVotersFuture.completeExceptionally(e) } else { controllerQuorumVotersFuture.complete( - Collections.singletonMap(nodeId, new InetSocketAddress("localhost", port)) + util.Map.of(nodeId, new InetSocketAddress("localhost", port)) ) } }) diff --git a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala index 2288d37aaadc1..14d679f25d3a1 100644 --- a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala +++ b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala @@ -20,6 +20,7 @@ package kafka.server import kafka.utils.TestUtils import org.apache.kafka.common.test.{KafkaClusterTestKit, TestKitNodes} import org.apache.kafka.common.utils.BufferSupplier +import org.apache.kafka.common.utils.LogContext import org.apache.kafka.metadata.MetadataRecordSerde import org.apache.kafka.raft.MetadataLogConfig import org.apache.kafka.snapshot.RecordsSnapshotReader @@ -79,7 +80,8 @@ class RaftClusterSnapshotTest { new MetadataRecordSerde(), BufferSupplier.create(), 1, - true + true, + new LogContext() ) ) { snapshot => // Check that the snapshot is non-empty diff --git a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala index 1d3b3493cce8e..f83c545c7b298 100644 --- a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala +++ b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala @@ -20,14 +20,14 @@ import kafka.server.{KafkaConfig, KafkaRaftServer} import kafka.utils.TestUtils import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.errors.CorruptRecordException -import org.apache.kafka.common.errors.{InvalidConfigurationException, RecordTooLargeException} +import org.apache.kafka.common.errors.RecordTooLargeException import org.apache.kafka.common.protocol import org.apache.kafka.common.protocol.{ObjectSerializationCache, Writable} import org.apache.kafka.common.record.ArbitraryMemoryRecords import org.apache.kafka.common.record.InvalidMemoryRecordsProvider import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.utils.Utils -import org.apache.kafka.raft.{KafkaRaftClient, LogAppendInfo, LogOffsetMetadata, MetadataLogConfig, OffsetAndEpoch, QuorumConfig, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} +import org.apache.kafka.raft.{KafkaRaftClient, LogAppendInfo, LogOffsetMetadata, MetadataLogConfig, QuorumConfig, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} import org.apache.kafka.raft.internals.BatchBuilder import org.apache.kafka.server.common.serialization.RecordSerde import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} @@ -40,16 +40,17 @@ import org.junit.jupiter.api.function.Executable import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ArgumentsSource - import net.jqwik.api.AfterFailureMode import net.jqwik.api.ForAll import net.jqwik.api.Property +import org.apache.kafka.common.config.{AbstractConfig, ConfigException} +import org.apache.kafka.server.common.OffsetAndEpoch import java.io.File import java.nio.ByteBuffer import java.nio.file.{Files, Path} import java.util -import java.util.{Collections, Optional, Properties} +import java.util.{Optional, Properties} import scala.jdk.CollectionConverters._ import scala.util.Using @@ -72,19 +73,19 @@ final class KafkaMetadataLogTest { @Test def testConfig(): Unit = { val props = new Properties() - props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, util.Arrays.asList("broker")) + props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, util.List.of("broker")) props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, "1@localhost:9093") props.put(KRaftConfigs.NODE_ID_CONFIG, Int.box(2)) props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG, Int.box(10240)) props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, Int.box(10 * 1024)) - assertThrows(classOf[InvalidConfigurationException], () => { + assertThrows(classOf[ConfigException], () => { val kafkaConfig = KafkaConfig.fromProps(props) val metadataConfig = new MetadataLogConfig(kafkaConfig) buildMetadataLog(tempDir, mockTime, metadataConfig) }) - props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG, Int.box(10240)) + props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG, Int.box(10 * 1024 * 1024)) val kafkaConfig = KafkaConfig.fromProps(props) val metadataConfig = new MetadataLogConfig(kafkaConfig) buildMetadataLog(tempDir, mockTime, metadataConfig) @@ -478,7 +479,7 @@ final class KafkaMetadataLogTest { assertEquals(log.earliestSnapshotId(), log.latestSnapshotId()) log.close() - mockTime.sleep(config.deleteDelayMillis) + mockTime.sleep(config.internalDeleteDelayMillis) // Assert that the log dir doesn't contain any older snapshots Files .walk(logDir, 1) @@ -649,7 +650,7 @@ final class KafkaMetadataLogTest { assertEquals(greaterSnapshotId, secondLog.latestSnapshotId().get) assertEquals(3 * numberOfRecords, secondLog.startOffset) assertEquals(epoch, secondLog.lastFetchedEpoch) - mockTime.sleep(config.deleteDelayMillis) + mockTime.sleep(config.internalDeleteDelayMillis) // Assert that the log dir doesn't contain any older snapshots Files @@ -687,15 +688,12 @@ final class KafkaMetadataLogTest { val leaderEpoch = 5 val maxBatchSizeInBytes = 16384 val recordSize = 64 - val config = new MetadataLogConfig( + val config = createMetadataLogConfig( DefaultMetadataLogConfig.logSegmentBytes, - DefaultMetadataLogConfig.logSegmentMinBytes, DefaultMetadataLogConfig.logSegmentMillis, DefaultMetadataLogConfig.retentionMaxBytes, DefaultMetadataLogConfig.retentionMillis, - maxBatchSizeInBytes, - DefaultMetadataLogConfig.maxFetchSizeInBytes, - DefaultMetadataLogConfig.deleteDelayMillis + maxBatchSizeInBytes ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -742,7 +740,7 @@ final class KafkaMetadataLogTest { ) val serializationCache = new ObjectSerializationCache - val records = Collections.singletonList(new Array[Byte](recordSize)) + val records = util.List.of(new Array[Byte](recordSize)) while (!batchBuilder.bytesNeeded(records, serializationCache).isPresent) { batchBuilder.appendRecord(records.get(0), serializationCache) } @@ -907,15 +905,13 @@ final class KafkaMetadataLogTest { @Test def testAdvanceLogStartOffsetAfterCleaning(): Unit = { - val config = new MetadataLogConfig( - 512, + val config = createMetadataLogConfig( 512, 10 * 1000, 256, 60 * 1000, 512, - DefaultMetadataLogConfig.maxFetchSizeInBytes, - ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT + DefaultMetadataLogConfig.internalMaxFetchSizeInBytes, ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -944,15 +940,12 @@ final class KafkaMetadataLogTest { @Test def testDeleteSnapshots(): Unit = { // Generate some logs and a few snapshots, set retention low and verify that cleaning occurs - val config = new MetadataLogConfig( - 1024, + val config = createMetadataLogConfig( 1024, 10 * 1000, 1024, 60 * 1000, 100, - DefaultMetadataLogConfig.maxBatchSizeInBytes, - DefaultMetadataLogConfig.maxFetchSizeInBytes ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -978,15 +971,12 @@ final class KafkaMetadataLogTest { @Test def testSoftRetentionLimit(): Unit = { // Set retention equal to the segment size and generate slightly more than one segment of logs - val config = new MetadataLogConfig( - 10240, + val config = createMetadataLogConfig( 10240, 10 * 1000, 10240, 60 * 1000, 100, - DefaultMetadataLogConfig.maxFetchSizeInBytes, - DefaultMetadataLogConfig.deleteDelayMillis ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -1020,17 +1010,21 @@ final class KafkaMetadataLogTest { }) } + @Test + def testSegmentMsConfigIsSetInMetadataLog(): Unit = { + val log = buildMetadataLog(tempDir, mockTime) + + assertEquals(DefaultMetadataLogConfig.logSegmentMillis, log.log.config().segmentMs) + } + @Test def testSegmentsLessThanLatestSnapshot(): Unit = { - val config = new MetadataLogConfig( - 10240, + val config = createMetadataLogConfig( 10240, 10 * 1000, 10240, 60 * 1000, 200, - DefaultMetadataLogConfig.maxFetchSizeInBytes, - DefaultMetadataLogConfig.deleteDelayMillis ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -1081,15 +1075,11 @@ object KafkaMetadataLogTest { override def read(input: protocol.Readable, size: Int): Array[Byte] = input.readArray(size) } - val DefaultMetadataLogConfig = new MetadataLogConfig( - 100 * 1024, + val DefaultMetadataLogConfig = createMetadataLogConfig( 100 * 1024, 10 * 1000, 100 * 1024, 60 * 1000, - KafkaRaftClient.MAX_BATCH_SIZE_BYTES, - KafkaRaftClient.MAX_FETCH_SIZE_BYTES, - ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT ) def buildMetadataLogAndDir( @@ -1166,4 +1156,25 @@ object KafkaMetadataLogTest { } dir } + + private def createMetadataLogConfig( + internalLogSegmentBytes: Int, + logSegmentMillis: Long, + retentionMaxBytes: Long, + retentionMillis: Long, + internalMaxBatchSizeInBytes: Int = KafkaRaftClient.MAX_BATCH_SIZE_BYTES, + internalMaxFetchSizeInBytes: Int = KafkaRaftClient.MAX_FETCH_SIZE_BYTES, + internalDeleteDelayMillis: Long = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT + ): MetadataLogConfig = { + val config: util.Map[String, Any] = util.Map.of( + MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG, internalLogSegmentBytes, + MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, logSegmentMillis, + MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG, retentionMaxBytes, + MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG, retentionMillis, + MetadataLogConfig.INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG, internalMaxBatchSizeInBytes, + MetadataLogConfig.INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG, internalMaxFetchSizeInBytes, + MetadataLogConfig.INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG, internalDeleteDelayMillis, + ) + new MetadataLogConfig(new AbstractConfig(MetadataLogConfig.CONFIG_DEF, config, false)) + } } diff --git a/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala b/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala index 6e8efa28b86b6..e67e041e1f59d 100644 --- a/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala +++ b/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala @@ -40,7 +40,6 @@ import org.mockito.Mockito.{mock, times, verify, when} import java.net.InetAddress import java.nio.ByteBuffer -import java.util.Collections import java.util.concurrent.CompletableFuture import java.util.concurrent.atomic.AtomicInteger import java.util.stream.Collectors @@ -56,7 +55,7 @@ class KafkaRequestHandlerTest { def testCallbackTiming(): Unit = { val time = new MockTime() val startTime = time.nanoseconds() - val metrics = new RequestChannelMetrics(Collections.emptySet[ApiKeys]) + val metrics = new RequestChannelMetrics(java.util.Set.of[ApiKeys]) val requestChannel = new RequestChannel(10, time, metrics) val apiHandler = mock(classOf[ApiRequestHandler]) try { diff --git a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala index ae9cdc36b6ba0..c0ce96dd67244 100644 --- a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala @@ -21,7 +21,7 @@ import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache import kafka.utils.{CoreUtils, Logging, TestUtils} import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.common.{TopicIdPartition, Uuid} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionChangeRecord, PartitionRecord, TopicRecord} @@ -32,6 +32,7 @@ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.storage.internals.log.{AppendOrigin, LogDirFailureChannel} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -39,6 +40,7 @@ import org.junit.jupiter.api.Assertions._ import org.mockito.Mockito.mock import java.io.File +import java.util.{Map => JMap} import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -48,7 +50,8 @@ class LocalLeaderEndPointTest extends Logging { val topicId = Uuid.randomUuid() val topic = "test" val partition = 5 - val topicPartition = new TopicPartition(topic, partition) + val topicIdPartition = new TopicIdPartition(topicId, partition, topic) + val topicPartition = topicIdPartition.topicPartition() val sourceBroker: BrokerEndPoint = new BrokerEndPoint(0, "localhost", 9092) var replicaManager: ReplicaManager = _ var endPoint: LeaderEndPoint = _ @@ -59,7 +62,7 @@ class LocalLeaderEndPointTest extends Logging { def setUp(): Unit = { val props = TestUtils.createBrokerConfig(sourceBroker.id, port = sourceBroker.port) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val alterPartitionManager = mock(classOf[AlterPartitionManager]) val metrics = new Metrics quotaManager = QuotaFactory.instantiate(config, metrics, time, "", "") @@ -87,8 +90,8 @@ class LocalLeaderEndPointTest extends Logging { delta.replay(new PartitionRecord() .setPartitionId(partition) .setTopicId(topicId) - .setReplicas(List[Integer](sourceBroker.id).asJava) - .setIsr(List[Integer](sourceBroker.id).asJava) + .setReplicas(java.util.List.of[Integer](sourceBroker.id)) + .setIsr(java.util.List.of[Integer](sourceBroker.id)) .setLeader(sourceBroker.id) .setLeaderEpoch(0) .setPartitionEpoch(0) @@ -115,52 +118,52 @@ class LocalLeaderEndPointTest extends Logging { @Test def testFetchLatestOffset(): Unit = { - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(3L, 0), endPoint.fetchLatestOffset(topicPartition, currentLeaderEpoch = 0)) + assertEquals(new OffsetAndEpoch(3L, 0), endPoint.fetchLatestOffset(topicPartition, 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(6L, 1), endPoint.fetchLatestOffset(topicPartition, currentLeaderEpoch = 7)) + assertEquals(new OffsetAndEpoch(6L, 1), endPoint.fetchLatestOffset(topicPartition, 7)) } @Test def testFetchEarliestOffset(): Unit = { - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 0)) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) replicaManager.deleteRecords(timeout = 1000L, Map(topicPartition -> 3), _ => ()) - assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 7)) + assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestOffset(topicPartition, 7)) } @Test def testFetchEarliestLocalOffset(): Unit = { - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestLocalOffset(topicPartition, currentLeaderEpoch = 0)) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestLocalOffset(topicPartition, 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) replicaManager.logManager.getLog(topicPartition).foreach(log => log.updateLocalLogStartOffset(3)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 7)) - assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestLocalOffset(topicPartition, currentLeaderEpoch = 7)) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, 7)) + assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestLocalOffset(topicPartition, 7)) } @Test def testFetchEpochEndOffsets(): Unit = { - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - var result = endPoint.fetchEpochEndOffsets(Map( - topicPartition -> new OffsetForLeaderPartition() + var result = endPoint.fetchEpochEndOffsets(JMap.of( + topicPartition, new OffsetForLeaderPartition() .setPartition(topicPartition.partition) - .setLeaderEpoch(0) - )) + .setLeaderEpoch(0)) + ).asScala var expected = Map( topicPartition -> new EpochEndOffset() @@ -177,14 +180,14 @@ class LocalLeaderEndPointTest extends Logging { bumpLeaderEpoch() assertEquals(2, replicaManager.getPartitionOrException(topicPartition).getLeaderEpoch) - appendRecords(replicaManager, topicPartition, records) + appendRecords(replicaManager, topicIdPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - result = endPoint.fetchEpochEndOffsets(Map( - topicPartition -> new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(JMap.of( + topicPartition, new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(2) - )) + )).asScala expected = Map( topicPartition -> new EpochEndOffset() @@ -197,11 +200,11 @@ class LocalLeaderEndPointTest extends Logging { assertEquals(expected, result) // Check missing epoch: 1, we expect the API to return (leader_epoch=0, end_offset=3). - result = endPoint.fetchEpochEndOffsets(Map( - topicPartition -> new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(JMap.of( + topicPartition, new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(1) - )) + )).asScala expected = Map( topicPartition -> new EpochEndOffset() @@ -213,11 +216,11 @@ class LocalLeaderEndPointTest extends Logging { assertEquals(expected, result) // Check missing epoch: 5, we expect the API to return (leader_epoch=-1, end_offset=-1) - result = endPoint.fetchEpochEndOffsets(Map( - topicPartition -> new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(JMap.of( + topicPartition, new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(5) - )) + )).asScala expected = Map( topicPartition -> new EpochEndOffset() @@ -263,12 +266,12 @@ class LocalLeaderEndPointTest extends Logging { } private def appendRecords(replicaManager: ReplicaManager, - partition: TopicPartition, + partition: TopicIdPartition, records: MemoryRecords, origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { + def appendCallback(responses: scala.collection.Map[TopicIdPartition, PartitionResponse]): Unit = { val response = responses.get(partition) assertTrue(response.isDefined) result.fire(response.get) diff --git a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala index cb08a021e2c6d..46c7237dafbc1 100644 --- a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala +++ b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala @@ -18,7 +18,6 @@ package kafka.server import java.nio.ByteBuffer -import java.util.Collections import java.util.concurrent.atomic.AtomicReference import kafka.utils.TestUtils import kafka.utils.TestUtils.TestControllerRequestCompletionHandler @@ -95,7 +94,7 @@ class NodeToControllerRequestThreadTest { when(controllerNodeProvider.getControllerInfo()).thenReturn(controllerInfo(Some(activeController))) - val expectedResponse = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(2, java.util.Map.of("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -139,7 +138,7 @@ class NodeToControllerRequestThreadTest { when(controllerNodeProvider.getControllerInfo()).thenReturn( controllerInfo(Some(oldController)), controllerInfo(Some(newController))) - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -189,9 +188,9 @@ class NodeToControllerRequestThreadTest { controllerInfo(Some(oldController)), controllerInfo(Some(newController))) val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2, - Collections.singletonMap("a", Errors.NOT_CONTROLLER), - Collections.singletonMap("a", 2)) - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) + java.util.Map.of("a", Errors.NOT_CONTROLLER), + java.util.Map.of("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -257,7 +256,7 @@ class NodeToControllerRequestThreadTest { new EnvelopeResponseData().setErrorCode(Errors.NOT_CONTROLLER.code())) // response for retry request after receiving NOT_CONTROLLER error - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), @@ -320,8 +319,8 @@ class NodeToControllerRequestThreadTest { val retryTimeoutMs = 30000 val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2, - Collections.singletonMap("a", Errors.NOT_CONTROLLER), - Collections.singletonMap("a", 2)) + java.util.Map.of("a", Errors.NOT_CONTROLLER), + java.util.Map.of("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs) diff --git a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala index b3cc09c7bfe55..739f8968bd29f 100644 --- a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala @@ -17,7 +17,6 @@ package kafka.server -import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.epoch.util.MockBlockingSender import kafka.utils.TestUtils import org.apache.kafka.clients.FetchSessionHandler @@ -32,6 +31,7 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.util.MockTime +import org.apache.kafka.server.{LeaderEndPoint, PartitionFetchState, ReplicaState} import org.apache.kafka.storage.internals.log.UnifiedLog import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} @@ -40,6 +40,7 @@ import org.mockito.Mockito.{mock, when} import java.util import java.util.Optional +import java.util.{Map => JMap} import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -92,19 +93,19 @@ class RemoteLeaderEndPointTest { @Test def testFetchEpochEndOffsets(): Unit = { - val expected = Map( - topicPartition -> new EpochEndOffset() + val expected = util.Map.of( + topicPartition, new EpochEndOffset() .setPartition(topicPartition.partition) .setErrorCode(Errors.NONE.code) .setLeaderEpoch(0) .setEndOffset(logEndOffset)) - blockingSend.setOffsetsForNextResponse(expected.asJava) - val result = endPoint.fetchEpochEndOffsets(Map( - topicPartition -> new OffsetForLeaderPartition() + blockingSend.setOffsetsForNextResponse(expected) + val result = endPoint.fetchEpochEndOffsets(JMap.of( + topicPartition, new OffsetForLeaderPartition() .setPartition(topicPartition.partition) - .setLeaderEpoch(currentLeaderEpoch))) + .setLeaderEpoch(currentLeaderEpoch))).asScala - assertEquals(expected, result) + assertEquals(expected, result.asJava) } @Test @@ -131,18 +132,18 @@ class RemoteLeaderEndPointTest { val tp = new TopicPartition("topic1", 0) val topicId1 = Uuid.randomUuid() val log = mock(classOf[UnifiedLog]) - val partitionMap = Map( - tp -> PartitionFetchState(Some(topicId1), 150, None, 0, None, state = Fetching, lastFetchedEpoch = Optional.empty)) + val partitionMap = java.util.Map.of( + tp, new PartitionFetchState(Optional.of(topicId1), 150, Optional.empty(), 0, Optional.empty(), ReplicaState.FETCHING, Optional.empty)) when(replicaManager.localLogOrException(tp)).thenReturn(log) when(log.logStartOffset).thenReturn(1) - val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = endPoint.buildFetch(partitionMap) - assertTrue(partitionsWithError.isEmpty) - assertEquals(if (version < 15) -1L else 1L, fetchRequestOpt.get.fetchRequest.build(version).replicaEpoch) + val result1 = endPoint.buildFetch(partitionMap) + assertTrue(result1.partitionsWithError.isEmpty) + assertEquals(if (version < 15) -1L else 1L, result1.result.get.fetchRequest.build(version).replicaEpoch) currentBrokerEpoch = 2L - val ResultWithPartitions(newFetchRequestOpt, newPartitionsWithError) = endPoint.buildFetch(partitionMap) - assertTrue(newPartitionsWithError.isEmpty) - assertEquals(if (version < 15) -1L else 2L, newFetchRequestOpt.get.fetchRequest.build(version).replicaEpoch) + val result2 = endPoint.buildFetch(partitionMap) + assertTrue(result2.partitionsWithError.isEmpty) + assertEquals(if (version < 15) -1L else 2L, result2.result.get.fetchRequest.build(version).replicaEpoch) } } diff --git a/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala b/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala index 9d36cae25c239..0b197d467213a 100644 --- a/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala +++ b/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala @@ -16,12 +16,12 @@ */ package kafka.server.metadata -import kafka.server.ClientQuotaManager import org.apache.kafka.image.ClientQuotaDelta +import org.apache.kafka.server.quota.ClientQuotaManager import org.junit.jupiter.api.Assertions.{assertDoesNotThrow, assertEquals, assertThrows} import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable - +import java.util.Optional class ClientQuotaMetadataManagerTest { @@ -41,35 +41,35 @@ class ClientQuotaMetadataManagerTest { assertThrows(classOf[IllegalStateException],() => ClientQuotaMetadataManager.transferToClientQuotaEntity(IpEntity("a"))) assertThrows(classOf[IllegalStateException],() => ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultIpEntity)) assertEquals( - (Some(ClientQuotaManager.UserEntity("user")), None), + (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.empty()), ClientQuotaMetadataManager.transferToClientQuotaEntity(UserEntity("user")) ) assertEquals( - (Some(ClientQuotaManager.DefaultUserEntity), None), + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty()), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserEntity) ) assertEquals( - (None, Some(ClientQuotaManager.ClientIdEntity("client"))), + (Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(ClientIdEntity("client")) ) assertEquals( - (None, Some(ClientQuotaManager.DefaultClientIdEntity)), + (Optional.empty(), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultClientIdEntity) ) assertEquals( - (Some(ClientQuotaManager.UserEntity("user")), Some(ClientQuotaManager.ClientIdEntity("client"))), + (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(ExplicitUserExplicitClientIdEntity("user", "client")) ) assertEquals( - (Some(ClientQuotaManager.UserEntity("user")), Some(ClientQuotaManager.DefaultClientIdEntity)), + (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), ClientQuotaMetadataManager.transferToClientQuotaEntity(ExplicitUserDefaultClientIdEntity("user")) ) assertEquals( - (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.ClientIdEntity("client"))), + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserExplicitClientIdEntity("client")) ) assertEquals( - (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.DefaultClientIdEntity)), + (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserDefaultClientIdEntity) ) } diff --git a/core/src/test/scala/kafka/tools/LogCompactionTester.scala b/core/src/test/scala/kafka/tools/LogCompactionTester.scala deleted file mode 100755 index 2ea6c3aae6ca9..0000000000000 --- a/core/src/test/scala/kafka/tools/LogCompactionTester.scala +++ /dev/null @@ -1,349 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.tools - -import java.io._ -import java.nio.ByteBuffer -import java.nio.charset.StandardCharsets.UTF_8 -import java.nio.file.{Files, Path} -import java.time.Duration -import java.util.{Properties, Random} - -import joptsimple.OptionParser -import kafka.utils._ -import org.apache.kafka.clients.admin.{Admin, NewTopic} -import org.apache.kafka.clients.CommonClientConfigs -import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer} -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.serialization.{ByteArraySerializer, StringDeserializer} -import org.apache.kafka.common.utils.{Exit, AbstractIterator, Utils} -import org.apache.kafka.server.util.CommandLineUtils - -import scala.jdk.CollectionConverters._ - -/** - * This is a torture test that runs against an existing broker - * - * Here is how it works: - * - * It produces a series of specially formatted messages to one or more partitions. Each message it produces - * it logs out to a text file. The messages have a limited set of keys, so there is duplication in the key space. - * - * The broker will clean its log as the test runs. - * - * When the specified number of messages have been produced we create a consumer and consume all the messages in the topic - * and write that out to another text file. - * - * Using a stable unix sort we sort both the producer log of what was sent and the consumer log of what was retrieved by the message key. - * Then we compare the final message in both logs for each key. If this final message is not the same for all keys we - * print an error and exit with exit code 1, otherwise we print the size reduction and exit with exit code 0. - */ -object LogCompactionTester { - - //maximum line size while reading produced/consumed record text file - private val ReadAheadLimit = 4906 - - def main(args: Array[String]): Unit = { - val parser = new OptionParser(false) - val numMessagesOpt = parser.accepts("messages", "The number of messages to send or consume.") - .withRequiredArg - .describedAs("count") - .ofType(classOf[java.lang.Long]) - .defaultsTo(Long.MaxValue) - val messageCompressionOpt = parser.accepts("compression-type", "message compression type") - .withOptionalArg - .describedAs("compressionType") - .ofType(classOf[java.lang.String]) - .defaultsTo("none") - val numDupsOpt = parser.accepts("duplicates", "The number of duplicates for each key.") - .withRequiredArg - .describedAs("count") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(5) - val brokerOpt = parser.accepts("bootstrap-server", "The server(s) to connect to.") - .withRequiredArg - .describedAs("url") - .ofType(classOf[String]) - val topicsOpt = parser.accepts("topics", "The number of topics to test.") - .withRequiredArg - .describedAs("count") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(1) - val percentDeletesOpt = parser.accepts("percent-deletes", "The percentage of updates that are deletes.") - .withRequiredArg - .describedAs("percent") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(0) - val sleepSecsOpt = parser.accepts("sleep", "Time in milliseconds to sleep between production and consumption.") - .withRequiredArg - .describedAs("ms") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(0) - - val options = parser.parse(args: _*) - - if (args.isEmpty) - CommandLineUtils.printUsageAndExit(parser, "A tool to test log compaction. Valid options are: ") - - CommandLineUtils.checkRequiredArgs(parser, options, brokerOpt, numMessagesOpt) - - // parse options - val messages = options.valueOf(numMessagesOpt).longValue - val compressionType = options.valueOf(messageCompressionOpt) - val percentDeletes = options.valueOf(percentDeletesOpt).intValue - val dups = options.valueOf(numDupsOpt).intValue - val brokerUrl = options.valueOf(brokerOpt) - val topicCount = options.valueOf(topicsOpt).intValue - val sleepSecs = options.valueOf(sleepSecsOpt).intValue - - val testId = new Random().nextLong - val topics = (0 until topicCount).map("log-cleaner-test-" + testId + "-" + _).toArray - createTopics(brokerUrl, topics.toSeq) - - println(s"Producing $messages messages..to topics ${topics.mkString(",")}") - val producedDataFilePath = produceMessages(brokerUrl, topics, messages, compressionType, dups, percentDeletes) - println(s"Sleeping for $sleepSecs seconds...") - Thread.sleep(sleepSecs * 1000) - println("Consuming messages...") - val consumedDataFilePath = consumeMessages(brokerUrl, topics) - - val producedLines = lineCount(producedDataFilePath) - val consumedLines = lineCount(consumedDataFilePath) - val reduction = 100 * (1.0 - consumedLines.toDouble / producedLines.toDouble) - println(f"$producedLines%d rows of data produced, $consumedLines%d rows of data consumed ($reduction%.1f%% reduction).") - - println("De-duplicating and validating output files...") - validateOutput(producedDataFilePath.toFile, consumedDataFilePath.toFile) - Utils.delete(producedDataFilePath.toFile) - Utils.delete(consumedDataFilePath.toFile) - //if you change this line, we need to update test_log_compaction_tool.py system test - println("Data verification is completed") - } - - def createTopics(brokerUrl: String, topics: Seq[String]): Unit = { - val adminConfig = new Properties - adminConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) - val adminClient = Admin.create(adminConfig) - - try { - val topicConfigs = Map(TopicConfig.CLEANUP_POLICY_CONFIG -> TopicConfig.CLEANUP_POLICY_COMPACT) - val newTopics = topics.map(name => new NewTopic(name, 1, 1.toShort).configs(topicConfigs.asJava)).asJava - adminClient.createTopics(newTopics).all.get - - var pendingTopics: Seq[String] = Seq() - TestUtils.waitUntilTrue(() => { - val allTopics = adminClient.listTopics.names.get.asScala.toSeq - pendingTopics = topics.filter(topicName => !allTopics.contains(topicName)) - pendingTopics.isEmpty - }, s"timed out waiting for topics : $pendingTopics") - - } finally adminClient.close() - } - - def lineCount(filPath: Path): Int = Files.readAllLines(filPath).size - - def validateOutput(producedDataFile: File, consumedDataFile: File): Unit = { - val producedReader = externalSort(producedDataFile) - val consumedReader = externalSort(consumedDataFile) - val produced = valuesIterator(producedReader) - val consumed = valuesIterator(consumedReader) - - val producedDedupedFile = new File(producedDataFile.getAbsolutePath + ".deduped") - val producedDeduped : BufferedWriter = Files.newBufferedWriter(producedDedupedFile.toPath, UTF_8) - - val consumedDedupedFile = new File(consumedDataFile.getAbsolutePath + ".deduped") - val consumedDeduped : BufferedWriter = Files.newBufferedWriter(consumedDedupedFile.toPath, UTF_8) - var total = 0 - var mismatched = 0 - while (produced.hasNext && consumed.hasNext) { - val p = produced.next() - producedDeduped.write(p.toString) - producedDeduped.newLine() - val c = consumed.next() - consumedDeduped.write(c.toString) - consumedDeduped.newLine() - if (p != c) - mismatched += 1 - total += 1 - } - producedDeduped.close() - consumedDeduped.close() - println(s"Validated $total values, $mismatched mismatches.") - require(!produced.hasNext, "Additional values produced not found in consumer log.") - require(!consumed.hasNext, "Additional values consumed not found in producer log.") - require(mismatched == 0, "Non-zero number of row mismatches.") - // if all the checks worked out we can delete the deduped files - Utils.delete(producedDedupedFile) - Utils.delete(consumedDedupedFile) - } - - def require(requirement: Boolean, message: => Any): Unit = { - if (!requirement) { - System.err.println(s"Data validation failed : $message") - Exit.exit(1) - } - } - - def valuesIterator(reader: BufferedReader): Iterator[TestRecord] = { - new AbstractIterator[TestRecord] { - def makeNext(): TestRecord = { - var next = readNext(reader) - while (next != null && next.delete) - next = readNext(reader) - if (next == null) - allDone() - else - next - } - }.asScala - } - - def readNext(reader: BufferedReader): TestRecord = { - var line = reader.readLine() - if (line == null) - return null - var curr = TestRecord.parse(line) - while (true) { - line = peekLine(reader) - if (line == null) - return curr - val next = TestRecord.parse(line) - if (next == null || next.topicAndKey != curr.topicAndKey) - return curr - curr = next - reader.readLine() - } - null - } - - def peekLine(reader: BufferedReader) = { - reader.mark(ReadAheadLimit) - val line = reader.readLine - reader.reset() - line - } - - def externalSort(file: File): BufferedReader = { - val builder = new ProcessBuilder("sort", "--key=1,2", "--stable", "--buffer-size=20%", "--temporary-directory=" + Files.createTempDirectory("log_compaction_test"), file.getAbsolutePath) - val process = builder.start - new Thread() { - override def run(): Unit = { - val exitCode = process.waitFor() - if (exitCode != 0) { - System.err.println("Process exited abnormally.") - while (process.getErrorStream.available > 0) { - System.err.write(process.getErrorStream.read()) - } - } - } - }.start() - new BufferedReader(new InputStreamReader(process.getInputStream, UTF_8), 10 * 1024 * 1024) - } - - def produceMessages(brokerUrl: String, - topics: Array[String], - messages: Long, - compressionType: String, - dups: Int, - percentDeletes: Int): Path = { - val producerProps = new Properties - producerProps.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, Long.MaxValue.toString) - producerProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) - producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType) - val producer = new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer) - try { - val rand = new Random(1) - val keyCount = (messages / dups).toInt - val producedFilePath = Files.createTempFile("kafka-log-cleaner-produced-", ".txt") - println(s"Logging produce requests to $producedFilePath") - val producedWriter: BufferedWriter = Files.newBufferedWriter(producedFilePath, UTF_8) - for (i <- 0L until (messages * topics.length)) { - val topic = topics((i % topics.length).toInt) - val key = rand.nextInt(keyCount) - val delete = (i % 100) < percentDeletes - val msg = - if (delete) - new ProducerRecord[Array[Byte], Array[Byte]](topic, key.toString.getBytes(UTF_8), null) - else - new ProducerRecord(topic, key.toString.getBytes(UTF_8), i.toString.getBytes(UTF_8)) - producer.send(msg) - producedWriter.write(TestRecord(topic, key, i, delete).toString) - producedWriter.newLine() - } - producedWriter.close() - producedFilePath - } finally { - producer.close() - } - } - - def createConsumer(brokerUrl: String): Consumer[String, String] = { - val consumerProps = new Properties - consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "log-cleaner-test-" + new Random().nextInt(Int.MaxValue)) - consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) - consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") - new KafkaConsumer(consumerProps, new StringDeserializer, new StringDeserializer) - } - - def consumeMessages(brokerUrl: String, topics: Array[String]): Path = { - val consumer = createConsumer(brokerUrl) - consumer.subscribe(topics.toSeq.asJava) - val consumedFilePath = Files.createTempFile("kafka-log-cleaner-consumed-", ".txt") - println(s"Logging consumed messages to $consumedFilePath") - val consumedWriter: BufferedWriter = Files.newBufferedWriter(consumedFilePath, UTF_8) - - try { - var done = false - while (!done) { - val consumerRecords = consumer.poll(Duration.ofSeconds(20)) - if (!consumerRecords.isEmpty) { - for (record <- consumerRecords.asScala) { - val delete = record.value == null - val value = if (delete) -1L else record.value.toLong - consumedWriter.write(TestRecord(record.topic, record.key.toInt, value, delete).toString) - consumedWriter.newLine() - } - } else { - done = true - } - } - consumedFilePath - } finally { - consumedWriter.close() - consumer.close() - } - } - - def readString(buffer: ByteBuffer): String = { - Utils.utf8(buffer) - } - -} - -case class TestRecord(topic: String, key: Int, value: Long, delete: Boolean) { - override def toString = topic + "\t" + key + "\t" + value + "\t" + (if (delete) "d" else "u") - def topicAndKey = topic + key -} - -object TestRecord { - def parse(line: String): TestRecord = { - val components = line.split("\t") - new TestRecord(components(0), components(1).toInt, components(2).toLong, components(3) == "d") - } -} diff --git a/core/src/test/scala/kafka/utils/LoggingTest.scala b/core/src/test/scala/kafka/utils/LoggingTest.scala index 7479f021649e5..761b276c400bd 100644 --- a/core/src/test/scala/kafka/utils/LoggingTest.scala +++ b/core/src/test/scala/kafka/utils/LoggingTest.scala @@ -17,6 +17,7 @@ package kafka.utils +import org.apache.kafka.server.logger.LoggingController import java.lang.management.ManagementFactory import javax.management.ObjectName @@ -24,17 +25,8 @@ import org.junit.jupiter.api.Test import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.slf4j.LoggerFactory - class LoggingTest extends Logging { - @Test - def testTypeOfGetLoggers(): Unit = { - val log4jController = new LoggingController - // the return object of getLoggers must be a collection instance from java standard library. - // That enables mbean client to deserialize it without extra libraries. - assertEquals(classOf[java.util.ArrayList[String]], log4jController.getLoggers.getClass) - } - @Test def testLog4jControllerIsRegistered(): Unit = { val mbs = ManagementFactory.getPlatformMBeanServer @@ -42,7 +34,7 @@ class LoggingTest extends Logging { val log4jControllerName = ObjectName.getInstance("kafka:type=kafka.Log4jController") assertTrue(mbs.isRegistered(log4jControllerName), "kafka.utils.Log4jController is not registered") val log4jInstance = mbs.getObjectInstance(log4jControllerName) - assertEquals("kafka.utils.LoggingController", log4jInstance.getClassName) + assertEquals("org.apache.kafka.server.logger.LoggingController", log4jInstance.getClassName) } @Test diff --git a/core/src/test/scala/kafka/utils/TestInfoUtils.scala b/core/src/test/scala/kafka/utils/TestInfoUtils.scala index 83cbf869051c1..e6c70b6e8fe49 100644 --- a/core/src/test/scala/kafka/utils/TestInfoUtils.scala +++ b/core/src/test/scala/kafka/utils/TestInfoUtils.scala @@ -18,14 +18,14 @@ package kafka.utils import java.lang.reflect.Method import java.util -import java.util.{Collections, Optional} +import java.util.Optional import org.junit.jupiter.api.TestInfo import org.apache.kafka.clients.consumer.GroupProtocol class EmptyTestInfo extends TestInfo { override def getDisplayName: String = "" - override def getTags: util.Set[String] = Collections.emptySet() + override def getTags: util.Set[String] = java.util.Set.of() override def getTestClass: Optional[Class[_]] = Optional.empty() override def getTestMethod: Optional[Method] = Optional.empty() } @@ -34,10 +34,6 @@ object TestInfoUtils { final val TestWithParameterizedGroupProtocolNames = "{displayName}.groupProtocol={0}" - def isShareGroupTest(testInfo: TestInfo): Boolean = { - testInfo.getDisplayName.contains("kip932") - } - def maybeGroupProtocolSpecified(testInfo: TestInfo): Option[GroupProtocol] = { if (testInfo.getDisplayName.contains("groupProtocol=classic")) Some(GroupProtocol.CLASSIC) @@ -54,12 +50,4 @@ object TestInfoUtils { def isTransactionV2Enabled(testInfo: TestInfo): Boolean = { !testInfo.getDisplayName.contains("isTV2Enabled=false") } - - /** - * Returns whether eligible leader replicas version 1 is enabled. - * When no parameter is provided, the default returned is false. - */ - def isEligibleLeaderReplicasV1Enabled(testInfo: TestInfo): Boolean = { - testInfo.getDisplayName.contains("isELRV1Enabled=true") - } } diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala index 8834f6f36083c..9ca8b42cd14be 100644 --- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala @@ -57,13 +57,13 @@ class KafkaConfigTest { properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) assertBadConfigContainingMessage(properties, - "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") + "Missing required configuration \"controller.listener.names\" which has no default value.") - properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertBadConfigContainingMessage(properties, - "requirement failed: controller.listener.names must contain at least one value when running KRaft with just the broker role") + "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") KafkaConfig.fromProps(properties) } @@ -82,6 +82,10 @@ class KafkaConfigTest { "Invalid value -1 for configuration node.id: Value must be at least 0") properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) + assertBadConfigContainingMessage(properties, + "Missing required configuration \"controller.listener.names\" which has no default value.") + + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertBadConfigContainingMessage(properties, "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") @@ -90,15 +94,34 @@ class KafkaConfigTest { "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") - assertBadConfigContainingMessage(properties, - "No security protocol defined for listener CONTROLLER") + properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT") + KafkaConfig.fromProps(properties) + } + @Test + def testControllerListenerNamesMismatch(): Unit = { + val properties = new Properties() + properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "OTHER") + properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT") + assertBadConfigContainingMessage(properties, "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") + } - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - KafkaConfig.fromProps(properties) + @Test + def testControllerSecurityProtocolMapMissing(): Unit = { + val properties = new Properties() + properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "OTHER") + properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") + + assertBadConfigContainingMessage(properties, "No security protocol defined for listener CONTROLLER") } @Test @@ -116,12 +139,12 @@ class KafkaConfigTest { // We should be also able to set completely new property val config3 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact"))) assertEquals(1, config3.nodeId) - assertEquals(util.Arrays.asList("compact"), config3.logCleanupPolicy) + assertEquals(util.List.of("compact"), config3.logCleanupPolicy) // We should be also able to set several properties val config4 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact,delete", "--override", "node.id=2"))) assertEquals(2, config4.nodeId) - assertEquals(util.Arrays.asList("compact","delete"), config4.logCleanupPolicy) + assertEquals(util.List.of("compact","delete"), config4.logCleanupPolicy) } @Test diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index dc6595df87b0b..bba5278d7a6f3 100755 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -17,7 +17,6 @@ package kafka.admin -import java.util.Collections import kafka.server.{BaseRequestTest, BrokerServer} import kafka.utils.TestUtils import kafka.utils.TestUtils._ @@ -25,13 +24,9 @@ import org.apache.kafka.clients.admin.{Admin, NewPartitions, NewTopic} import org.apache.kafka.common.errors.InvalidReplicaAssignmentException import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import java.util -import java.util.Arrays.asList -import java.util.Collections.singletonList import java.util.concurrent.ExecutionException import scala.jdk.CollectionConverters._ @@ -65,12 +60,11 @@ class AddPartitionsTest extends BaseRequestTest { admin = createAdminClient() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testWrongReplicaCount(quorum: String): Unit = { + @Test + def testWrongReplicaCount(): Unit = { assertEquals(classOf[InvalidReplicaAssignmentException], assertThrows(classOf[ExecutionException], () => { - admin.createPartitions(Collections.singletonMap(topic1, - NewPartitions.increaseTo(2, singletonList(asList(0, 1, 2))))).all().get() + admin.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(2, util.List.of(util.List.of[Integer](0, 1, 2))))).all().get() }).getCause.getClass) } @@ -78,16 +72,15 @@ class AddPartitionsTest extends BaseRequestTest { * Test that when we supply a manual partition assignment to createTopics, it must be 0-based * and consecutive. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMissingPartitionsInCreateTopics(quorum: String): Unit = { + @Test + def testMissingPartitionsInCreateTopics(): Unit = { val topic6Placements = new util.HashMap[Integer, util.List[Integer]] - topic6Placements.put(1, asList(0, 1)) - topic6Placements.put(2, asList(1, 0)) + topic6Placements.put(1, util.List.of(0, 1)) + topic6Placements.put(2, util.List.of(1, 0)) val topic7Placements = new util.HashMap[Integer, util.List[Integer]] - topic7Placements.put(2, asList(0, 1)) - topic7Placements.put(3, asList(1, 0)) - val futures = admin.createTopics(asList( + topic7Placements.put(2, util.List.of(0, 1)) + topic7Placements.put(3, util.List.of(1, 0)) + val futures = admin.createTopics(util.List.of( new NewTopic("new-topic6", topic6Placements), new NewTopic("new-topic7", topic7Placements))).values() val topic6Cause = assertThrows(classOf[ExecutionException], () => futures.get("new-topic6").get()).getCause @@ -104,21 +97,19 @@ class AddPartitionsTest extends BaseRequestTest { * Test that when we supply a manual partition assignment to createPartitions, it must contain * enough partitions. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMissingPartitionsInCreatePartitions(quorum: String): Unit = { + @Test + def testMissingPartitionsInCreatePartitions(): Unit = { val cause = assertThrows(classOf[ExecutionException], () => - admin.createPartitions(Collections.singletonMap(topic1, - NewPartitions.increaseTo(3, singletonList(asList(0, 1, 2))))).all().get()).getCause + admin.createPartitions(util.Map.of(topic1, + NewPartitions.increaseTo(3, util.List.of(util.List.of[Integer](0, 1, 2))))).all().get()).getCause assertEquals(classOf[InvalidReplicaAssignmentException], cause.getClass) assertTrue(cause.getMessage.contains("Attempted to add 2 additional partition(s), but only 1 assignment(s) " + "were specified."), "Unexpected error message: " + cause.getMessage) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementPartitions(quorum: String): Unit = { - admin.createPartitions(Collections.singletonMap(topic1, NewPartitions.increaseTo(3))).all().get() + @Test + def testIncrementPartitions(): Unit = { + admin.createPartitions(util.Map.of(topic1, NewPartitions.increaseTo(3))).all().get() // wait until leader is elected waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic1, 1) @@ -128,7 +119,7 @@ class AddPartitionsTest extends BaseRequestTest { TestUtils.waitForPartitionMetadata(brokers, topic1, 1) TestUtils.waitForPartitionMetadata(brokers, topic1, 2) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(Seq(topic1).asJava, false).build) + new MetadataRequest.Builder(util.List.of(topic1), false).build) assertEquals(1, response.topicMetadata.size) val partitions = response.topicMetadata.asScala.head.partitionMetadata.asScala.sortBy(_.partition) assertEquals(partitions.size, 3) @@ -144,12 +135,11 @@ class AddPartitionsTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testManualAssignmentOfReplicas(quorum: String): Unit = { + @Test + def testManualAssignmentOfReplicas(): Unit = { // Add 2 partitions - admin.createPartitions(Collections.singletonMap(topic2, NewPartitions.increaseTo(3, - asList(asList(0, 1), asList(2, 3))))).all().get() + admin.createPartitions(util.Map.of(topic2, NewPartitions.increaseTo(3, + util.List.of(util.List.of[Integer](0, 1), util.List.of[Integer](2, 3))))).all().get() // wait until leader is elected val leader1 = waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic2, 1) val leader2 = waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic2, 2) @@ -160,7 +150,7 @@ class AddPartitionsTest extends BaseRequestTest { val partition2Metadata = TestUtils.waitForPartitionMetadata(brokers, topic2, 2) assertEquals(leader2, partition2Metadata.leader()) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(Seq(topic2).asJava, false).build) + new MetadataRequest.Builder(util.List.of(topic2), false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head val partitionMetadata = topicMetadata.partitionMetadata.asScala.sortBy(_.partition) @@ -173,10 +163,9 @@ class AddPartitionsTest extends BaseRequestTest { assertEquals(Set(0, 1), replicas.asScala.toSet) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testReplicaPlacementAllServers(quorum: String): Unit = { - admin.createPartitions(Collections.singletonMap(topic3, NewPartitions.increaseTo(7))).all().get() + @Test + def testReplicaPlacementAllServers(): Unit = { + admin.createPartitions(util.Map.of(topic3, NewPartitions.increaseTo(7))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic3, 1) @@ -187,7 +176,7 @@ class AddPartitionsTest extends BaseRequestTest { TestUtils.waitForPartitionMetadata(brokers, topic3, 6) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(Seq(topic3).asJava, false).build) + new MetadataRequest.Builder(util.List.of(topic3), false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head @@ -201,17 +190,16 @@ class AddPartitionsTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testReplicaPlacementPartialServers(quorum: String): Unit = { - admin.createPartitions(Collections.singletonMap(topic2, NewPartitions.increaseTo(3))).all().get() + @Test + def testReplicaPlacementPartialServers(): Unit = { + admin.createPartitions(util.Map.of(topic2, NewPartitions.increaseTo(3))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic2, 1) TestUtils.waitForPartitionMetadata(brokers, topic2, 2) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(Seq(topic2).asJava, false).build) + new MetadataRequest.Builder(util.List.of(topic2), false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head diff --git a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala index d9eaa7b2aacc0..d475c6e42918d 100644 --- a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala @@ -19,11 +19,10 @@ package kafka.cluster import kafka.log.LogManager import kafka.utils.TestUtils import kafka.utils.TestUtils.MockAlterPartitionManager -import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.common.{DirectoryId, TopicPartition, Uuid} import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.requests.LeaderAndIsrRequest import org.apache.kafka.common.utils.Utils -import org.apache.kafka.metadata.{MetadataCache, MockConfigRepository} +import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, MockConfigRepository, PartitionRegistration} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.util.MockTime @@ -38,7 +37,6 @@ import java.io.File import java.lang.{Long => JLong} import java.util.{Optional, Properties} import java.util.concurrent.atomic.AtomicInteger -import scala.jdk.CollectionConverters._ object AbstractPartitionTest { val brokerId = 101 @@ -100,7 +98,7 @@ class AbstractPartitionTest { def createLogProperties(overrides: Map[String, String]): Properties = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1000: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 999: java.lang.Integer) overrides.foreach { case (k, v) => logProps.put(k, v) } @@ -120,31 +118,25 @@ class AbstractPartitionTest { isLeader: Boolean): Partition = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val controllerEpoch = 0 - val replicas = List[Integer](brokerId, remoteReplicaId).asJava + val replicas = Array(brokerId, remoteReplicaId) val isr = replicas + val partitionRegistrationBuilder = new PartitionRegistration.Builder() + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) if (isLeader) { - assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = partitionRegistrationBuilder.setLeader(brokerId).build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) } else { - assertTrue(partition.makeFollower(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(remoteReplicaId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setIsNew(true), offsetCheckpoints, None), "Expected become follower transition to succeed") + val partitionRegistration = partitionRegistrationBuilder.setLeader(remoteReplicaId).build() + assertTrue(partition.makeFollower(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become follower transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) - assertEquals(None, partition.leaderLogIfLocal) + assertTrue(partition.leaderLogIfLocal.isEmpty) } partition diff --git a/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala b/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala index c34a7ac7536ba..6172afd286df1 100644 --- a/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala @@ -16,102 +16,99 @@ */ package kafka.cluster -import org.apache.kafka.common.requests.LeaderAndIsrRequest +import org.apache.kafka.common.DirectoryId +import org.apache.kafka.metadata.{LeaderRecoveryState, PartitionRegistration} import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} +import java.util import scala.jdk.CollectionConverters._ object AssignmentStateTest { import AbstractPartitionTest._ - def parameters: java.util.stream.Stream[Arguments] = Seq[Arguments]( + def parameters: util.stream.Stream[Arguments] = util.List.of[Arguments]( Arguments.of( - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List.empty[Integer], List.empty[Integer], Seq.empty[Int], Boolean.box(false)), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId, brokerId + 1, brokerId + 2), + Array.emptyIntArray, Array.emptyIntArray, util.List.of[Int], Boolean.box(false)), Arguments.of( - List[Integer](brokerId, brokerId + 1), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List.empty[Integer], List.empty[Integer], Seq.empty[Int], Boolean.box(true)), + Array(brokerId, brokerId + 1), + Array(brokerId, brokerId + 1, brokerId + 2), + Array.emptyIntArray, Array.emptyIntArray, util.List.of[Int], Boolean.box(true)), Arguments.of( - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId + 3, brokerId + 4), - List[Integer](brokerId + 1), - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId + 3, brokerId + 4), + Array(brokerId + 1), + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId + 3, brokerId + 4), - List.empty[Integer], - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId + 3, brokerId + 4), + Array.emptyIntArray, + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List.empty[Integer], - List[Integer](brokerId + 1), - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId, brokerId + 1, brokerId + 2), + Array.emptyIntArray, + Array(brokerId + 1), + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId + 1, brokerId + 2), - List[Integer](brokerId + 1, brokerId + 2), - List[Integer](brokerId), - List.empty[Integer], - Seq(brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId + 1, brokerId + 2), + Array(brokerId + 1, brokerId + 2), + Array(brokerId), + Array.emptyIntArray, + util.List.of(brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId + 2, brokerId + 3, brokerId + 4), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), - List.empty[Integer], - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId + 2, brokerId + 3, brokerId + 4), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId + 3, brokerId + 4, brokerId + 5), + Array.emptyIntArray, + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId + 2, brokerId + 3, brokerId + 4), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), - List.empty[Integer], - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + Array(brokerId + 2, brokerId + 3, brokerId + 4), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId + 3, brokerId + 4, brokerId + 5), + Array.emptyIntArray, + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - List[Integer](brokerId + 2, brokerId + 3), - List[Integer](brokerId, brokerId + 1, brokerId + 2), - List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), - List.empty[Integer], - Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(true)) - ).asJava.stream() + Array(brokerId + 2, brokerId + 3), + Array(brokerId, brokerId + 1, brokerId + 2), + Array(brokerId + 3, brokerId + 4, brokerId + 5), + Array.emptyIntArray, + util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(true)) + ).stream() } class AssignmentStateTest extends AbstractPartitionTest { @ParameterizedTest @MethodSource(Array("parameters")) - def testPartitionAssignmentStatus(isr: List[Integer], replicas: List[Integer], - adding: List[Integer], removing: List[Integer], - original: Seq[Int], isUnderReplicated: Boolean): Unit = { - val controllerEpoch = 3 - - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + def testPartitionAssignmentStatus(isr: Array[Int], replicas: Array[Int], + adding: Array[Int], removing: Array[Int], + original: util.List[Int], isUnderReplicated: Boolean): Unit = { + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) - .setIsr(isr.asJava) + .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas.asJava) - .setIsNew(false) - if (adding.nonEmpty) - leaderState.setAddingReplicas(adding.asJava) - if (removing.nonEmpty) - leaderState.setRemovingReplicas(removing.asJava) - - val isReassigning = adding.nonEmpty || removing.nonEmpty + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setAddingReplicas(adding) + .setRemovingReplicas(removing) + .build() // set the original replicas as the URP calculation will need them - if (original.nonEmpty) - partition.assignmentState = SimpleAssignmentState(original) + if (!original.isEmpty) + partition.assignmentState = SimpleAssignmentState(original.asScala) // do the test - partition.makeLeader(leaderState, offsetCheckpoints, None) + partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None) + val isReassigning = !adding.isEmpty || !removing.isEmpty assertEquals(isReassigning, partition.isReassigning) - if (adding.nonEmpty) - adding.foreach(r => assertTrue(partition.isAddingReplica(r))) + adding.foreach(r => assertTrue(partition.isAddingReplica(r))) if (adding.contains(brokerId)) assertTrue(partition.isAddingLocalReplica) else diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala index 4a3051ddc9567..fe262360a32a8 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala @@ -26,11 +26,11 @@ import kafka.server._ import kafka.utils._ import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} -import org.apache.kafka.common.requests.{FetchRequest, LeaderAndIsrRequest} +import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.utils.Utils -import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.common.{DirectoryId, TopicPartition, Uuid} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.{LeaderAndIsr, MetadataCache, MockConfigRepository} +import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, MockConfigRepository, PartitionRegistration} import org.apache.kafka.server.common.{RequestLocal, TopicIdPartition} import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} @@ -139,19 +139,20 @@ class PartitionLockTest extends Logging { def testGetReplicaWithUpdateAssignmentAndIsr(): Unit = { val active = new AtomicBoolean(true) val replicaToCheck = 3 - val firstReplicaSet = Seq[Integer](3, 4, 5).asJava - val secondReplicaSet = Seq[Integer](1, 2, 3).asJava - def partitionState(replicas: java.util.List[Integer]) = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(1) - .setLeader(replicas.get(0)) + val firstReplicaSet = Array(3, 4, 5) + val secondReplicaSet = Array(1, 2, 3) + def partitionRegistration(replicas: Array[Int]) = new PartitionRegistration.Builder() + .setLeader(replicas(0)) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(true) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val offsetCheckpoints: OffsetCheckpoints = mock(classOf[OffsetCheckpoints]) // Update replica set synchronously first to avoid race conditions - partition.makeLeader(partitionState(secondReplicaSet), offsetCheckpoints, None) + partition.makeLeader(partitionRegistration(secondReplicaSet), isNew = true, offsetCheckpoints, None) assertTrue(partition.getReplica(replicaToCheck).isDefined, s"Expected replica $replicaToCheck to be defined") val future = executorService.submit((() => { @@ -164,7 +165,7 @@ class PartitionLockTest extends Logging { secondReplicaSet } - partition.makeLeader(partitionState(replicas), offsetCheckpoints, None) + partition.makeLeader(partitionRegistration(replicas), isNew = true, offsetCheckpoints, None) i += 1 Thread.sleep(1) // just to avoid tight loop @@ -343,26 +344,27 @@ class PartitionLockTest extends Logging { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - val controllerEpoch = 0 - val replicas = (0 to numReplicaFetchers).map(i => Integer.valueOf(brokerId + i)).toList.asJava + val replicas = (0 to numReplicaFetchers).map(i => brokerId + i).toArray val isr = replicas - replicas.forEach(replicaId => when(metadataCache.getAliveBrokerEpoch(replicaId)).thenReturn(Optional.of(1L))) + replicas.foreach(replicaId => when(metadataCache.getAliveBrokerEpoch(replicaId)).thenReturn(Optional.of(1L))) - assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(true), offsetCheckpoints, Some(topicId)), "Expected become leader transition to succeed") + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, Some(topicId)), "Expected become leader transition to succeed") partition } private def createLogProperties(overrides: Map[String, String]): Properties = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1000: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 999: java.lang.Integer) overrides.foreach { case (k, v) => logProps.put(k, v) } diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala index dd2cc239dfcb8..5662c2d227636 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala @@ -21,17 +21,17 @@ import com.yammer.metrics.core.Metric import kafka.log.LogManager import kafka.server._ import kafka.utils._ -import org.apache.kafka.common.errors.{ApiException, FencedLeaderEpochException, InconsistentTopicIdException, InvalidTxnStateException, NotLeaderOrFollowerException, OffsetNotAvailableException, OffsetOutOfRangeException, UnknownLeaderEpochException} +import org.apache.kafka.common.errors.{ApiException, FencedLeaderEpochException, InconsistentTopicIdException, InvalidTxnStateException, NotLeaderOrFollowerException, OffsetNotAvailableException, OffsetOutOfRangeException, PolicyViolationException, UnknownLeaderEpochException} import org.apache.kafka.common.message.{AlterPartitionResponseData, FetchResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.FileRecords.TimestampAndOffset import org.apache.kafka.common.record._ -import org.apache.kafka.common.requests.{AlterPartitionResponse, FetchRequest, LeaderAndIsrRequest, ListOffsetsRequest, RequestHeader} +import org.apache.kafka.common.requests.{AlterPartitionResponse, FetchRequest, ListOffsetsRequest, RequestHeader} import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{DirectoryId, IsolationLevel, TopicPartition, Uuid} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, PartitionRegistration} import org.apache.kafka.server.config.ReplicationConfigs -import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.replica.Replica import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.ArgumentMatchers @@ -61,11 +61,13 @@ import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, Unexpec import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, EpochEntry, LocalLog, LogAppendInfo, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogReadInfo, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, EpochEntry, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogReadInfo, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource +import java.lang +import java.util import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOption @@ -101,7 +103,7 @@ object PartitionTest { /** * Verifies the callbacks that have been triggered since the last - * verification. Values different than `-1` are the ones that have + * verification. Values different from `-1` are the ones that have * been updated. */ def verify( @@ -185,7 +187,7 @@ class PartitionTest extends AbstractPartitionTest { val leaderEpoch = 10 val logStartOffset = 0L val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true) - addBrokerEpochToMockMetadataCache(metadataCache, List(remoteReplicaId)) + addBrokerEpochToMockMetadataCache(metadataCache, Array(remoteReplicaId)) def epochEndOffset(epoch: Int, endOffset: Long): FetchResponseData.EpochEndOffset = { new FetchResponseData.EpochEndOffset() @@ -304,24 +306,23 @@ class PartitionTest extends AbstractPartitionTest { @Test def testReplicaFetchToFollower(): Unit = { - val controllerEpoch = 3 val followerId = brokerId + 1 val leaderId = brokerId + 2 - val replicas = List[Integer](brokerId, followerId, leaderId).asJava - val isr = List[Integer](brokerId, followerId, leaderId).asJava + val replicas = Array(brokerId, followerId, leaderId) + val isr = Array(brokerId, followerId, leaderId) val leaderEpoch = 8 val partitionEpoch = 1 - assertTrue(partition.makeFollower(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(leaderId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(partitionEpoch) .setReplicas(replicas) - .setIsNew(true), - offsetCheckpoints, None - )) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeFollower(partitionRegistration, isNew = true, offsetCheckpoints, None)) def assertFetchFromReplicaFails[T <: ApiException]( expectedExceptionClass: Class[T], @@ -345,27 +346,26 @@ class PartitionTest extends AbstractPartitionTest { @Test def testFetchFromUnrecognizedFollower(): Unit = { - val controllerEpoch = 3 val leader = brokerId val validReplica = brokerId + 1 val addingReplica1 = brokerId + 2 val addingReplica2 = brokerId + 3 - val replicas = List(leader, validReplica) - val isr = List[Integer](leader, validReplica).asJava + val replicas = Array(leader, validReplica) + val isr = Array(leader, validReplica) val leaderEpoch = 8 val partitionEpoch = 1 - addBrokerEpochToMockMetadataCache(metadataCache, List(leader, addingReplica1, addingReplica2)) + addBrokerEpochToMockMetadataCache(metadataCache, Array(leader, addingReplica1, addingReplica2)) - assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + var partitionRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(partitionEpoch) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, topicId - )) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, topicId)) assertThrows(classOf[UnknownLeaderEpochException], () => { fetchFollower( @@ -388,21 +388,21 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(None, partition.getReplica(addingReplica2).map(_.stateSnapshot.logEndOffset)) // The replicas are added as part of a reassignment - val newReplicas = List(leader, validReplica, addingReplica1, addingReplica2) + val newReplicas = Array(leader, validReplica, addingReplica1, addingReplica2) val newPartitionEpoch = partitionEpoch + 1 - val addingReplicas = List(addingReplica1, addingReplica2) + val addingReplicas = Array(addingReplica1, addingReplica2) - assertFalse(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + partitionRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(newPartitionEpoch) - .setReplicas(newReplicas.map(Int.box).asJava) - .setAddingReplicas(addingReplicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None - )) + .setReplicas(newReplicas) + .setAddingReplicas(addingReplicas) + .setDirectories(DirectoryId.unassignedArray(newReplicas.length)) + .build() + assertFalse(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None)) // Now the fetches are allowed assertEquals(0L, fetchFollower( @@ -428,6 +428,7 @@ class PartitionTest extends AbstractPartitionTest { val appendSemaphore = new Semaphore(0) val mockTime = new MockTime() val prevLeaderEpoch = 0 + val replicas = Array(0, 1, 2, brokerId) partition = new Partition( topicPartition, @@ -480,20 +481,22 @@ class PartitionTest extends AbstractPartitionTest { } partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, None) - var partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + var partitionRegistration = new PartitionRegistration.Builder() .setLeader(2) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(prevLeaderEpoch) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - .setIsNew(false) - assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None)) + val appendThread = new Thread { override def run(): Unit = { val records = createRecords( - List( + util.List.of( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes) ), @@ -506,15 +509,16 @@ class PartitionTest extends AbstractPartitionTest { appendThread.start() TestUtils.waitUntilTrue(() => appendSemaphore.hasQueuedThreads, "follower log append is not called.") - partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + partitionRegistration = new PartitionRegistration.Builder() .setLeader(2) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(prevLeaderEpoch + 1) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(2) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - .setIsNew(false) - assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) appendSemaphore.release() appendThread.join() @@ -664,7 +668,8 @@ class PartitionTest extends AbstractPartitionTest { val leaderEpoch = 5 val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true) - addBrokerEpochToMockMetadataCache(metadataCache, List(remoteReplicaId)) + addBrokerEpochToMockMetadataCache(metadataCache, Array(remoteReplicaId)) + def sendFetch(leaderEpoch: Option[Int]): LogReadInfo = { fetchFollower( partition, @@ -799,12 +804,11 @@ class PartitionTest extends AbstractPartitionTest { */ @Test def testMonotonicOffsetsAfterLeaderChange(): Unit = { - val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = List(leader, follower1, follower2) - val isr = List[Integer](leader, follower2).asJava + val replicas = Array(leader, follower1, follower2) + val isr = Array(leader, follower2) val leaderEpoch = 8 val batch1 = TestUtils.records(records = List( new SimpleRecord(10, "k1".getBytes, "v1".getBytes), @@ -814,16 +818,17 @@ class PartitionTest extends AbstractPartitionTest { new SimpleRecord(21,"k5".getBytes, "v3".getBytes))) addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") - assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch") assertEquals(Set[Integer](leader, follower2), partition.partitionState.isr, "ISR") @@ -885,28 +890,28 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(Right(None), fetchOffsetsForTimestamp(30, Some(IsolationLevel.READ_UNCOMMITTED))) // Make into a follower - val followerState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val followerRegistration = new PartitionRegistration.Builder() .setLeader(follower2) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 1) .setIsr(isr) .setPartitionEpoch(4) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertTrue(partition.makeFollower(followerState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeFollower(followerRegistration, isNew = false, offsetCheckpoints, None)) // Back to leader, this resets the startLogOffset for this epoch (to 2), we're now in the fault condition - val newLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val newLeaderRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 2) .setIsr(isr) .setPartitionEpoch(5) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(newLeaderRegistration, isNew = false, offsetCheckpoints, None)) // Try to get offsets as a client fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match { @@ -977,17 +982,19 @@ class PartitionTest extends AbstractPartitionTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val log = partition.localLogOrException val epoch = 1 + val replicas = Array(0, 1, 2, brokerId) // Start off as follower - val partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(1) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(epoch) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - .setIsNew(false) - partition.makeFollower(partitionState, offsetCheckpoints, None) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None) val initialLogStartOffset = 5L partition.truncateFullyAndStartAt(initialLogStartOffset, isFuture = false) @@ -1001,7 +1008,7 @@ class PartitionTest extends AbstractPartitionTest { classOf[UnexpectedAppendOffsetException], // append one record with offset = 3 () => partition.appendRecordsToFollowerOrFutureReplica( - createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), + createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), isFuture = false, partitionLeaderEpoch = epoch ) @@ -1012,7 +1019,7 @@ class PartitionTest extends AbstractPartitionTest { // verify that we can append records that contain log start offset, even when first // offset < log start offset if the log is empty val newLogStartOffset = 4L - val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes), + val records = createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), baseOffset = newLogStartOffset) @@ -1022,7 +1029,7 @@ class PartitionTest extends AbstractPartitionTest { // and we can append more records after that partition.appendRecordsToFollowerOrFutureReplica( - createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), + createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), isFuture = false, partitionLeaderEpoch = epoch ) @@ -1030,7 +1037,7 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset not expected to change:") // but we cannot append to offset < log start if the log is not empty - val records2 = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes), + val records2 = createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes)), baseOffset = 3L) assertThrows( @@ -1041,7 +1048,7 @@ class PartitionTest extends AbstractPartitionTest { // we still can append to next offset partition.appendRecordsToFollowerOrFutureReplica( - createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), + createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), isFuture = false, partitionLeaderEpoch = epoch ) @@ -1051,30 +1058,32 @@ class PartitionTest extends AbstractPartitionTest { @Test def testListOffsetIsolationLevels(): Unit = { - val controllerEpoch = 0 val leaderEpoch = 5 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(leaderEpoch, partition.getLeaderEpoch) - val records = createTransactionalRecords(List( + val records = createTransactionalRecords(util.List.of( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), baseOffset = 0L, producerId = 2L) - val verificationGuard = partition.maybeStartTransactionVerification(2L, 0, 0, true) + val verificationGuard = partition.maybeStartTransactionVerification(2L, 0, 0, supportsEpochBump = true) partition.appendRecordsToLeader(records, origin = AppendOrigin.CLIENT, requiredAcks = 0, RequestLocal.withThreadConfinedCaching, verificationGuard) def fetchOffset(isolationLevel: Option[IsolationLevel], timestamp: Long): TimestampAndOffset = { @@ -1130,7 +1139,7 @@ class PartitionTest extends AbstractPartitionTest { assertThrows( classOf[NotLeaderOrFollowerException], () => partition.appendRecordsToFollowerOrFutureReplica( - createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), + createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), isFuture = false, partitionLeaderEpoch = 0 ) @@ -1139,47 +1148,51 @@ class PartitionTest extends AbstractPartitionTest { @Test def testMakeFollowerWithNoLeaderIdChange(): Unit = { + val replicas = Array(0, 1, 2, brokerId) // Start off as follower - var partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + var partitionRegistration = new PartitionRegistration.Builder() .setLeader(1) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - .setIsNew(false) - partition.makeFollower(partitionState, offsetCheckpoints, None) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None) // Request with same leader and epoch increases by only 1, do become-follower steps - partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + partitionRegistration = new PartitionRegistration.Builder() .setLeader(1) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(4) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - .setIsNew(false) - assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) // Request with same leader and same epoch, skip become-follower steps - partitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + partitionRegistration = new PartitionRegistration.Builder() .setLeader(1) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(4) - .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) - assertFalse(partition.makeFollower(partitionState, offsetCheckpoints, None)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertFalse(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) } @Test def testFollowerDoesNotJoinISRUntilCaughtUpToOffsetWithinCurrentLeaderEpoch(): Unit = { - val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = Seq(leader, follower1, follower2) - val isr = List[Integer](leader, follower2).asJava + val replicas = Array(leader, follower1, follower2) + val isr = Array(leader, follower2) val leaderEpoch = 8 val batch1 = TestUtils.records(records = List(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes))) @@ -1188,17 +1201,18 @@ class PartitionTest extends AbstractPartitionTest { new SimpleRecord("k5".getBytes, "v3".getBytes))) val batch3 = TestUtils.records(records = List(new SimpleRecord("k6".getBytes, "v1".getBytes), new SimpleRecord("k7".getBytes, "v2".getBytes))) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch") assertEquals(Set[Integer](leader, follower2), partition.partitionState.isr, "ISR") @@ -1217,25 +1231,27 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(lastOffsetOfFirstBatch + 1, partition.log.get.highWatermark, "Expected leader's HW") // current leader becomes follower and then leader again (without any new records appended) - val followerState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val followerRegistration = new PartitionRegistration.Builder() .setLeader(follower2) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 1) .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - partition.makeFollower(followerState, offsetCheckpoints, None) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeFollower(followerRegistration, isNew = false, offsetCheckpoints, None) - val newLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val newLeaderRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 2) .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, topicId), + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(newLeaderRegistration, isNew = false, offsetCheckpoints, topicId), "Expected makeLeader() to return 'leader changed' after makeFollower()") val currentLeaderEpochStartOffset = partition.localLogOrException.logEndOffset @@ -1258,38 +1274,38 @@ class PartitionTest extends AbstractPartitionTest { Set(leader, follower1, follower2), "AlterIsr") } - def createRecords(records: Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = { - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) + def createRecords(records: lang.Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = { + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) val builder = MemoryRecords.builder( buf, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, TimestampType.LOG_APPEND_TIME, baseOffset, time.milliseconds, partitionLeaderEpoch) - records.foreach(builder.append) + records.forEach(builder.append) builder.build() } - def createIdempotentRecords(records: Iterable[SimpleRecord], + def createIdempotentRecords(records: lang.Iterable[SimpleRecord], baseOffset: Long, baseSequence: Int = 0, producerId: Long = 1L): MemoryRecords = { val producerEpoch = 0.toShort val isTransactional = false - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) val builder = MemoryRecords.builder(buf, Compression.NONE, baseOffset, producerId, producerEpoch, baseSequence, isTransactional) - records.foreach(builder.append) + records.forEach(builder.append) builder.build() } - def createTransactionalRecords(records: Iterable[SimpleRecord], + def createTransactionalRecords(records: lang.Iterable[SimpleRecord], baseOffset: Long, baseSequence: Int = 0, producerId: Long = 1L): MemoryRecords = { val producerEpoch = 0.toShort val isTransactional = true - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) val builder = MemoryRecords.builder(buf, Compression.NONE, baseOffset, producerId, producerEpoch, baseSequence, isTransactional) - records.foreach(builder.append) + records.forEach(builder.append) builder.build() } @@ -1299,30 +1315,31 @@ class PartitionTest extends AbstractPartitionTest { */ @Test def testAtMinIsr(): Unit = { - val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = List[Integer](leader, follower1, follower2).asJava - val isr = List[Integer](leader).asJava + val replicas = Array(leader, follower1, follower2) + val isr = Array(leader) val leaderEpoch = 8 assertFalse(partition.isAtMinIsr) // Make isr set to only have leader to trigger AtMinIsr (default min isr config is 1) - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(true) - partition.makeLeader(leaderState, offsetCheckpoints, None) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, None) assertTrue(partition.isAtMinIsr) } @Test def testIsUnderMinIsr(): Unit = { + val replicas = Array(brokerId, brokerId + 1) configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, @@ -1335,29 +1352,29 @@ class PartitionTest extends AbstractPartitionTest { logManager, alterPartitionManager) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = None) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + + var leaderRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(replicas) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, None) assertFalse(partition.isUnderMinIsr) - val LeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) + leaderRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(List(brokerId).map(Int.box).asJava) + .setIsr(Array(brokerId)) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) .setPartitionEpoch(2) - .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setIsNew(false) - - partition.makeLeader(LeaderState, offsetCheckpoints, None) + .build() + partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) assertTrue(partition.isUnderMinIsr) } @@ -1366,26 +1383,25 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) seedLogData(log, numRecords = 6, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) val isr = replicas - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val initializeTimeMs = time.milliseconds() - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, @@ -1430,26 +1446,24 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) seedLogData(log, numRecords = 6, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val initializeTimeMs = time.milliseconds() - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(List[Integer](brokerId).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") - + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(Array(brokerId)) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") doAnswer(_ => { // simulate topic is deleted at the moment partition.delete() @@ -1466,24 +1480,23 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = List[Integer](brokerId).asJava - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, @@ -1518,24 +1531,23 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId) - val isr = List[Integer](brokerId).asJava + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, @@ -1555,7 +1567,7 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = 10L) assertEquals(alterPartitionManager.isrUpdates.size, 1) val isrItem = alterPartitionManager.isrUpdates.head - assertEquals(isrItem.leaderAndIsr.isr, List(brokerId, remoteBrokerId).map(Int.box).asJava) + assertEquals(isrItem.leaderAndIsr.isr, util.List.of[Integer](brokerId, remoteBrokerId)) isrItem.leaderAndIsr.isrWithBrokerEpoch.asScala.foreach { brokerState => // the broker epochs should be equal to broker epoch of the leader assertEquals(defaultBrokerEpoch(brokerState.brokerId()), brokerState.brokerEpoch()) @@ -1582,24 +1594,23 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = List[Integer](brokerId).asJava - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, @@ -1638,11 +1649,10 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId) - val shrinkedIsr = Set(brokerId) + val replicas = Array(brokerId, remoteBrokerId) + val shrinkedIsr = Array(brokerId) addBrokerEpochToMockMetadataCache(metadataCache, replicas) @@ -1660,21 +1670,17 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue( - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false), - offsetCheckpoints, - None - ), - "Expected become leader transition to succeed" - ) + var partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(replicas.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) @@ -1702,28 +1708,23 @@ class PartitionTest extends AbstractPartitionTest { seedLogData(log, numRecords = 10, leaderEpoch) // Controller shrinks the ISR after - assertFalse( - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(shrinkedIsr.toList.map(Int.box).asJava) - .setPartitionEpoch(2) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false), - offsetCheckpoints, - None - ), - "Expected to stay leader" - ) + partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(shrinkedIsr) + .setPartitionEpoch(2) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertFalse(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None), "Expected to stay leader") assertTrue(partition.isLeader) - assertEquals(shrinkedIsr, partition.partitionState.isr) - assertEquals(shrinkedIsr, partition.partitionState.maximalIsr) + assertEquals(shrinkedIsr.toSet, partition.partitionState.isr) + assertEquals(shrinkedIsr.toSet, partition.partitionState.maximalIsr) assertEquals(Set.empty, partition.getOutOfSyncReplicas(partition.replicaLagTimeMaxMs)) - // In the case of unfenced, the HWM doesn't increase, otherwise the the HWM increases because the + // In the case of unfenced, the HWM doesn't increase, otherwise the HWM increases because the // fenced and shutdown replica is not considered during HWM calculation. if (brokerState == "unfenced") { assertEquals(10, partition.localLogOrException.highWatermark) @@ -1732,17 +1733,15 @@ class PartitionTest extends AbstractPartitionTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIsrNotExpandedIfReplicaIsFencedOrShutdown(quorum: String): Unit = { + @Test + def testIsrNotExpandedIfReplicaIsFencedOrShutdown(): Unit = { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId) - val isr = Set(brokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) addBrokerEpochToMockMetadataCache(metadataCache, replicas) @@ -1766,18 +1765,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) markRemoteReplicaEligible(true) @@ -1793,7 +1792,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -1801,8 +1800,8 @@ class PartitionTest extends AbstractPartitionTest { alterPartitionManager.failIsrUpdate(Errors.INELIGIBLE_REPLICA) // The leader reverts back to the previous ISR. - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -1813,8 +1812,8 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is not triggered because the follower is fenced. - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -1825,7 +1824,7 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is triggered. - assertEquals(isr, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertTrue(partition.partitionState.isInflight) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -1845,12 +1844,11 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = List(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Set(brokerId, remoteBrokerId2) + val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Array(brokerId, remoteBrokerId2) val metadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) @@ -1872,21 +1870,21 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) // Fetch to let the follower catch up to the log end offset, but using a wrong broker epoch. The expansion should fail. - addBrokerEpochToMockMetadataCache(metadataCache, List(brokerId, remoteBrokerId2)) + addBrokerEpochToMockMetadataCache(metadataCache, Array(brokerId, remoteBrokerId2)) // Create a race case where the replica epoch get bumped right after the previous fetch succeeded. val wrongReplicaEpoch = defaultBrokerEpoch(remoteBrokerId1) - 1 when(metadataCache.getAliveBrokerEpoch(remoteBrokerId1)).thenReturn(Optional.of(wrongReplicaEpoch), Optional.of(defaultBrokerEpoch(remoteBrokerId1))) @@ -1904,8 +1902,8 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is not triggered. - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) assertEquals(0, alterPartitionManager.isrUpdates.size) // Fetch again, this time with correct default broker epoch. @@ -1922,7 +1920,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) val isrUpdate = alterPartitionManager.isrUpdates.head @@ -1940,11 +1938,10 @@ class PartitionTest extends AbstractPartitionTest { def testFenceFollowerFetchWithStaleBrokerEpoch(): Unit = { val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId1) - val isr = Set(brokerId, remoteBrokerId1) + val replicas = Array(brokerId, remoteBrokerId1) + val isr = Array(brokerId, remoteBrokerId1) addBrokerEpochToMockMetadataCache(metadataCache, replicas) @@ -1962,18 +1959,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) val expectedReplicaEpoch = defaultBrokerEpoch(remoteBrokerId1) fetchFollower(partition, @@ -2005,11 +2002,10 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = List(brokerId, remoteBrokerId) - val isr = Set(brokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( @@ -2026,18 +2022,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) // Fetch to let the follower catch up to the log end offset and // to check if an expansion is possible. @@ -2051,7 +2047,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -2059,8 +2055,8 @@ class PartitionTest extends AbstractPartitionTest { alterPartitionManager.failIsrUpdate(Errors.INELIGIBLE_REPLICA) // The leader reverts back to the previous ISR. - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -2071,8 +2067,8 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is not triggered because the follower is fenced. - assertEquals(isr, partition.partitionState.isr) - assertEquals(isr, partition.partitionState.maximalIsr) + assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -2083,7 +2079,7 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is triggered. - assertEquals(isr, partition.partitionState.isr) + assertEquals(isr.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertTrue(partition.partitionState.isInflight) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -2103,16 +2099,14 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId, remoteBrokerId) val topicId = Uuid.randomUuid() assertTrue(makeLeader( topicId = Some(topicId), - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2127,7 +2121,7 @@ class PartitionTest extends AbstractPartitionTest { // Try to shrink the ISR partition.maybeShrinkIsr() assertEquals(alterPartitionManager.isrUpdates.size, 1) - assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId).map(Int.box).asJava) + assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, util.List.of[Integer](brokerId)) assertEquals(Set(brokerId, remoteBrokerId), partition.partitionState.isr) assertEquals(Set(brokerId, remoteBrokerId), partition.partitionState.maximalIsr) @@ -2156,16 +2150,15 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) + val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Array(brokerId, remoteBrokerId1, remoteBrokerId2) val initializeTimeMs = time.milliseconds() val metadataCache = mock(classOf[KRaftMetadataCache]) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( topicPartition, @@ -2181,16 +2174,16 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(0L, partition.localLogOrException.highWatermark) fetchFollower(partition, replicaId = remoteBrokerId1, fetchOffset = log.logEndOffset) @@ -2213,7 +2206,7 @@ class PartitionTest extends AbstractPartitionTest { partition.maybeShrinkIsr() assertEquals(0, alterPartitionListener.shrinks.get) assertEquals(alterPartitionManager.isrUpdates.size, 1) - assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId, remoteBrokerId1).map(Int.box).asJava) + assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, util.List.of[Integer](brokerId, remoteBrokerId1)) val isrUpdate = alterPartitionManager.isrUpdates.head isrUpdate.leaderAndIsr.isrWithBrokerEpoch.asScala.foreach { brokerState => assertEquals(defaultBrokerEpoch(brokerState.brokerId()), brokerState.brokerEpoch()) @@ -2239,14 +2232,13 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Seq(brokerId, remoteBrokerId1) + val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Array(brokerId, remoteBrokerId1) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( topicPartition, @@ -2262,16 +2254,16 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.toList.map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true), - offsetCheckpoints, None), "Expected become leader transition to succeed") + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertTrue(partition.isUnderMinIsr) assertEquals(0L, partition.localLogOrException.highWatermark) @@ -2296,16 +2288,14 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2328,7 +2318,6 @@ class PartitionTest extends AbstractPartitionTest { // Become leader again, reset the ISR state assertFalse(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2354,17 +2343,15 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2414,17 +2401,15 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2462,16 +2447,14 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId, remoteBrokerId) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2549,16 +2532,14 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Seq(brokerId, remoteBrokerId) - val isr = Seq(brokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + val replicas = Array(brokerId, remoteBrokerId) + val isr = Array(brokerId) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2595,7 +2576,7 @@ class PartitionTest extends AbstractPartitionTest { private def createClientResponseWithAlterPartitionResponse( topicPartition: TopicPartition, partitionErrorCode: Short, - isr: List[Int] = List.empty, + isr: util.List[Integer] = util.List.of[Integer], leaderEpoch: Int = 0, partitionEpoch: Int = 0 ): ClientResponse = { @@ -2604,7 +2585,7 @@ class PartitionTest extends AbstractPartitionTest { topicResponse.partitions.add(new AlterPartitionResponseData.PartitionData() .setPartitionIndex(topicPartition.partition) - .setIsr(isr.map(Integer.valueOf).asJava) + .setIsr(isr) .setLeaderEpoch(leaderEpoch) .setPartitionEpoch(partitionEpoch) .setErrorCode(partitionErrorCode)) @@ -2641,15 +2622,14 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val follower1 = brokerId + 1 val follower2 = brokerId + 2 val follower3 = brokerId + 3 - val replicas = Seq(brokerId, follower1, follower2, follower3) - val isr = Seq(brokerId, follower1, follower2) + val replicas = Array(brokerId, follower1, follower2, follower3) + val isr = Array(brokerId, follower1, follower2) val partitionEpoch = 1 - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) doNothing().when(delayedOperations).checkAndCompleteAll() @@ -2659,7 +2639,7 @@ class PartitionTest extends AbstractPartitionTest { // Complete the ISR expansion val alterPartitionResponseWithoutError = - createClientResponseWithAlterPartitionResponse(topicPartition, Errors.NONE.code, List(brokerId, follower1, follower2, follower3), leaderEpoch, partitionEpoch + 1) + createClientResponseWithAlterPartitionResponse(topicPartition, Errors.NONE.code, util.List.of[Integer](brokerId, follower1, follower2, follower3), leaderEpoch, partitionEpoch + 1) when(mockChannelManager.sendRequest(any(), any())) .thenAnswer { invocation => @@ -2673,7 +2653,6 @@ class PartitionTest extends AbstractPartitionTest { assertTrue(makeLeader( topicId = topicId, - controllerEpoch, leaderEpoch, isr, replicas, @@ -2698,20 +2677,18 @@ class PartitionTest extends AbstractPartitionTest { val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) seedLogData(log, numRecords = 10, leaderEpoch = 4) - val controllerEpoch = 0 val leaderEpoch = 5 val follower1 = brokerId + 1 val follower2 = brokerId + 2 val follower3 = brokerId + 3 - val replicas = Seq(brokerId, follower1, follower2, follower3) - val isr = Seq(brokerId, follower1, follower2) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) + val replicas = Array(brokerId, follower1, follower2, follower3) + val isr = Array(brokerId, follower1, follower2) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) doNothing().when(delayedOperations).checkAndCompleteAll() assertTrue(makeLeader( topicId = topicId, - controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2742,35 +2719,35 @@ class PartitionTest extends AbstractPartitionTest { when(offsetCheckpoints.fetch(logDir1.getAbsolutePath, topicPartition)) .thenReturn(Optional.of(long2Long(4L))) - val controllerEpoch = 3 - val replicas = List[Integer](brokerId, brokerId + 1).asJava - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val replicas = Array(brokerId, brokerId + 1) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(false) - partition.makeLeader(leaderState, offsetCheckpoints, None) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) assertEquals(4, partition.localLogOrException.highWatermark) } @Test def testTopicIdAndPartitionMetadataFileForLeader(): Unit = { - val controllerEpoch = 3 val leaderEpoch = 5 val topicId = Uuid.randomUuid() - val replicas = List[Integer](brokerId, brokerId + 1).asJava - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val replicas = Array(brokerId, brokerId + 1) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(false) - partition.makeLeader(leaderState, offsetCheckpoints, Some(topicId)) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(topicId)) checkTopicId(topicId, partition) @@ -2793,28 +2770,28 @@ class PartitionTest extends AbstractPartitionTest { // Calling makeLeader with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException. // This scenario should not occur, since the topic ID check will fail. - assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeLeader(leaderState, offsetCheckpoints, Some(Uuid.randomUuid()))) + assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(Uuid.randomUuid()))) // Calling makeLeader with no topic ID should not overwrite the old topic ID. We should get the original log. - partition2.makeLeader(leaderState, offsetCheckpoints, None) + partition2.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) checkTopicId(topicId, partition2) } @Test def testTopicIdAndPartitionMetadataFileForFollower(): Unit = { - val controllerEpoch = 3 val leaderEpoch = 5 val topicId = Uuid.randomUuid() - val replicas = List[Integer](brokerId, brokerId + 1).asJava - val leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val replicas = Array(brokerId, brokerId + 1) + val leaderRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(false) - partition.makeFollower(leaderState, offsetCheckpoints, Some(topicId)) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(topicId)) checkTopicId(topicId, partition) @@ -2837,10 +2814,10 @@ class PartitionTest extends AbstractPartitionTest { // Calling makeFollower with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException. // This scenario should not occur, since the topic ID check will fail. - assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeFollower(leaderState, offsetCheckpoints, Some(Uuid.randomUuid()))) + assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeFollower(leaderRegistration, isNew = false, offsetCheckpoints, Some(Uuid.randomUuid()))) // Calling makeFollower with no topic ID should not overwrite the old topic ID. We should get the original log. - partition2.makeFollower(leaderState, offsetCheckpoints, None) + partition2.makeFollower(leaderRegistration, isNew = false, offsetCheckpoints, None) checkTopicId(topicId, partition2) } @@ -2879,23 +2856,20 @@ class PartitionTest extends AbstractPartitionTest { @Test def testUnderReplicatedPartitionsCorrectSemantics(): Unit = { - val controllerEpoch = 3 - val replicas = List[Integer](brokerId, brokerId + 1, brokerId + 2).asJava - val isr = List[Integer](brokerId, brokerId + 1).asJava - - var leaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val replicas = Array(brokerId, brokerId + 1, brokerId + 2) + val isr = Array(brokerId, brokerId + 1) + val leaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(false) - partition.makeLeader(leaderState, offsetCheckpoints, None) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + partition.makeLeader(leaderRegistrationBuilder.build(), isNew = false, offsetCheckpoints, None) assertTrue(partition.isUnderReplicated) - leaderState = leaderState.setIsr(replicas) - partition.makeLeader(leaderState, offsetCheckpoints, None) + partition.makeLeader(leaderRegistrationBuilder.setIsr(replicas).build(), isNew = false, offsetCheckpoints, None) assertFalse(partition.isUnderReplicated) } @@ -3079,24 +3053,22 @@ class PartitionTest extends AbstractPartitionTest { @Test def testDoNotResetReplicaStateIfLeaderEpochIsNotBumped(): Unit = { - val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = List(leaderId, followerId) + val replicas = Array(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val initialLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val LeaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(leaderId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) + .setIsr(Array(leaderId)) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3120,16 +3092,7 @@ class PartitionTest extends AbstractPartitionTest { // makeLeader is called again with the same leader epoch but with // a newer partition epoch. This can happen in KRaft when a partition // is reassigned. The leader epoch is not bumped when we add replicas. - val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(leaderId) - .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) - .setPartitionEpoch(2) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) + assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(2).build(), isNew = false, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3144,23 +3107,21 @@ class PartitionTest extends AbstractPartitionTest { @Test def testDoNotUpdateEpochStartOffsetIfLeaderEpochIsNotBumped(): Unit = { - val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = List(leaderId, followerId) + val replicas = Array(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val initialLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val LeaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(leaderId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) + .setIsr(Array(leaderId)) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3177,16 +3138,7 @@ class PartitionTest extends AbstractPartitionTest { // makeLeader is called again with the same leader epoch but with // a newer partition epoch. - val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(leaderId) - .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) - .setPartitionEpoch(2) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) + assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(2).build(), isNew = false, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3196,144 +3148,114 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIgnoreLeaderPartitionStateChangeWithOlderPartitionEpoch(): Unit = { - val controllerEpoch = 3 val leaderId = brokerId - val replicas = List(leaderId) + val replicas = Array(leaderId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val initialLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val LeaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(leaderId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) + .setIsr(Array(leaderId)) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) // makeLeader is called again with the same leader epoch but with // a older partition epoch. - val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(leaderId) - .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId).map(Int.box).asJava) - .setPartitionEpoch(0) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) + assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(0).build(), isNew = false, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) } @Test def testIgnoreFollowerPartitionStateChangeWithOlderPartitionEpoch(): Unit = { - val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = List(leaderId, followerId) + val replicas = Array(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val initialFollowerState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val LeaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(followerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId, followerId).map(Int.box).asJava) + .setIsr(Array(leaderId)) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertTrue(partition.makeFollower(initialFollowerState, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) // makeLeader is called again with the same leader epoch but with // a older partition epoch. - val updatedFollowerState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(followerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(List(leaderId, followerId).map(Int.box).asJava) - .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertFalse(partition.makeFollower(updatedFollowerState, offsetCheckpoints, Some(topicId))) + assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setIsr(Array(leaderId, followerId)).build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) } @Test def testFollowerShouldNotHaveAnyRemoteReplicaStates(): Unit = { - val controllerEpoch = 3 val localReplica = brokerId val remoteReplica1 = brokerId + 1 val remoteReplica2 = brokerId + 2 - val replicas = List(localReplica, remoteReplica1, remoteReplica2) + val replicas = Array(localReplica, remoteReplica1, remoteReplica2) val topicId = Uuid.randomUuid() // The local replica is the leader. - val initialLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val leaderRegistrationBuilder = new PartitionRegistration.Builder() .setLeader(localReplica) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(replicas.map(Int.box).asJava) + .setIsr(replicas) .setPartitionEpoch(1) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(true) - - assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + assertTrue(partition.makeLeader(leaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(1, partition.getLeaderEpoch) assertEquals(Some(localReplica), partition.leaderReplicaIdOpt) assertEquals(replicas.toSet, partition.partitionState.isr) assertEquals(Seq(remoteReplica1, remoteReplica2), partition.remoteReplicas.map(_.brokerId).toSeq) - assertEquals(replicas, partition.assignmentState.replicas) + assertEquals(replicas.toSeq, partition.assignmentState.replicas) // The local replica becomes a follower. - val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val updatedLeaderRegistration = leaderRegistrationBuilder .setLeader(remoteReplica1) .setLeaderEpoch(2) - .setIsr(replicas.map(Int.box).asJava) .setPartitionEpoch(2) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(false) - - assertTrue(partition.makeFollower(updatedLeaderState, offsetCheckpoints, Some(topicId))) + .build() + assertTrue(partition.makeFollower(updatedLeaderRegistration, isNew = false, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(2, partition.getLeaderEpoch) assertEquals(Some(remoteReplica1), partition.leaderReplicaIdOpt) assertEquals(Set.empty, partition.partitionState.isr) assertEquals(Seq.empty, partition.remoteReplicas.map(_.brokerId).toSeq) - assertEquals(replicas, partition.assignmentState.replicas) + assertEquals(replicas.toSeq, partition.assignmentState.replicas) } @Test def testAddAndRemoveListeners(): Unit = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val replicas = Seq(brokerId, brokerId + 1) + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(isr.map(Int.box).asJava) - .setReplicas(replicas.map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) - + addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(isr) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) val listener1 = new MockPartitionListener() val listener2 = new MockPartitionListener() @@ -3390,19 +3312,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testAddListenerFailsWhenPartitionIsDeleted(): Unit = { + val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(replicas) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) partition.delete() @@ -3413,20 +3335,19 @@ class PartitionTest extends AbstractPartitionTest { def testPartitionListenerWhenLogOffsetsChanged(): Unit = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val replicas = Seq(brokerId, brokerId + 1) - val isr = Seq(brokerId, brokerId + 1) - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(isr.map(Int.box).asJava) - .setReplicas(replicas.map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + val replicas = Array(brokerId, brokerId + 1) + val isr = Array(brokerId, brokerId + 1) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(isr) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3449,26 +3370,26 @@ class PartitionTest extends AbstractPartitionTest { listener.verify(expectedHighWatermark = partition.localLogOrException.logEndOffset) - partition.truncateFullyAndStartAt(0L, false) + partition.truncateFullyAndStartAt(0L, isFuture = false) listener.verify(expectedHighWatermark = 0L) } @Test def testPartitionListenerWhenPartitionFailed(): Unit = { + val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(replicas) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3480,19 +3401,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testPartitionListenerWhenPartitionIsDeleted(): Unit = { + val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(0) - .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(0) + .setIsr(replicas) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3508,21 +3429,20 @@ class PartitionTest extends AbstractPartitionTest { partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, topicId = topicId) assertTrue(partition.log.isDefined) - val replicas = Seq(brokerId, brokerId + 1) + val replicas = Array(brokerId, brokerId + 1) val isr = replicas val epoch = 0 - addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) - partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(0) - .setLeader(brokerId) - .setLeaderEpoch(epoch) - .setIsr(isr.map(Int.box).asJava) - .setReplicas(replicas.map(Int.box).asJava) - .setPartitionEpoch(1) - .setIsNew(true), - offsetCheckpoints, - topicId = None) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(epoch) + .setIsr(isr) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setPartitionEpoch(1) + .build() + partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3576,25 +3496,26 @@ class PartitionTest extends AbstractPartitionTest { @Test def testMaybeStartTransactionVerification(): Unit = { - val controllerEpoch = 0 val leaderEpoch = 5 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas val producerId = 22L partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) - val idempotentRecords = createIdempotentRecords(List( + val idempotentRecords = createIdempotentRecords(util.List.of( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), @@ -3602,7 +3523,7 @@ class PartitionTest extends AbstractPartitionTest { producerId = producerId) partition.appendRecordsToLeader(idempotentRecords, origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching) - def transactionRecords() = createTransactionalRecords(List( + def transactionRecords() = createTransactionalRecords(util.List.of( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), @@ -3614,7 +3535,7 @@ class PartitionTest extends AbstractPartitionTest { assertThrows(classOf[InvalidTxnStateException], () => partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching)) // Before appendRecordsToLeader is called, ReplicaManager will call maybeStartTransactionVerification. We should get a non-sentinel VerificationGuard. - val verificationGuard = partition.maybeStartTransactionVerification(producerId, 3, 0, true) + val verificationGuard = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) // With the wrong VerificationGuard, append should fail. @@ -3622,22 +3543,21 @@ class PartitionTest extends AbstractPartitionTest { origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching, new VerificationGuard())) // We should return the same VerificationGuard when we still need to verify. Append should proceed. - val verificationGuard2 = partition.maybeStartTransactionVerification(producerId, 3, 0, true) + val verificationGuard2 = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) assertEquals(verificationGuard, verificationGuard2) partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching, verificationGuard) // We should no longer need a VerificationGuard. Future appends without VerificationGuard will also succeed. - val verificationGuard3 = partition.maybeStartTransactionVerification(producerId, 3, 0, true) + val verificationGuard3 = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) assertEquals(VerificationGuard.SENTINEL, verificationGuard3) partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching) } private def makeLeader( topicId: Option[Uuid], - controllerEpoch: Int, leaderEpoch: Int, - isr: Seq[Int], - replicas: Seq[Int], + isr: Array[Int], + replicas: Array[Int], partitionEpoch: Int, isNew: Boolean, partition: Partition = partition @@ -3648,18 +3568,16 @@ class PartitionTest extends AbstractPartitionTest { offsetCheckpoints, topicId ) - val newLeader = partition.makeLeader( - new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) - .setLeader(brokerId) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr.map(Int.box).asJava) - .setPartitionEpoch(partitionEpoch) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(isNew), - offsetCheckpoints, - topicId - ) + val partitionRegistration = new PartitionRegistration.Builder() + .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(partitionEpoch) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + val newLeader = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, topicId) assertTrue(partition.isLeader) assertFalse(partition.partitionState.isInflight) assertEquals(topicId, partition.topicId) @@ -3804,7 +3722,7 @@ class PartitionTest extends AbstractPartitionTest { ) } - private def addBrokerEpochToMockMetadataCache(metadataCache: MetadataCache, brokers: List[Int]): Unit = { + private def addBrokerEpochToMockMetadataCache(metadataCache: MetadataCache, brokers: Array[Int]): Unit = { brokers.foreach { broker => when(metadataCache.getAliveBrokerEpoch(broker)).thenReturn(Optional.of(defaultBrokerEpoch(broker))) } @@ -3830,25 +3748,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(true) // When - val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -3875,25 +3793,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(true) // When - val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -3920,25 +3838,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(false) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -3965,25 +3883,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(false) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -4010,25 +3928,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.UNASSIGNED when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(partitionRegistration, isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -4056,25 +3974,25 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) - val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = List[Integer](brokerId, brokerId + 1).asJava + val replicas = Array(brokerId, brokerId + 1) val isr = replicas - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setControllerEpoch(controllerEpoch) + val partitionRegistration = new PartitionRegistration.Builder() .setLeader(brokerId) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setIsNew(isNew) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.UNASSIGNED when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(partitionRegistration, isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) @@ -4112,4 +4030,46 @@ class PartitionTest extends AbstractPartitionTest { alterPartitionManager) partition.tryCompleteDelayedRequests() } + + @Test + def testDeleteRecordsOnLeaderWithEmptyPolicy(): Unit = { + val leaderEpoch = 5 + val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true) + + val emptyPolicyConfig = new LogConfig(util.Map.of( + TopicConfig.CLEANUP_POLICY_CONFIG, "" + )) + + val mockLog = mock(classOf[UnifiedLog]) + when(mockLog.config).thenReturn(emptyPolicyConfig) + when(mockLog.logEndOffset).thenReturn(2L) + when(mockLog.logStartOffset).thenReturn(0L) + when(mockLog.highWatermark).thenReturn(2L) + when(mockLog.maybeIncrementLogStartOffset(any(), any())).thenReturn(true) + + partition.setLog(mockLog, false) + + val result = partition.deleteRecordsOnLeader(1L) + assertEquals(1L, result.requestedOffset) + } + + @Test + def testDeleteRecordsOnLeaderWithCompactPolicy(): Unit = { + val leaderEpoch = 5 + val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true) + + val emptyPolicyConfig = new LogConfig(util.Map.of( + TopicConfig.CLEANUP_POLICY_CONFIG, "compact" + )) + + val mockLog = mock(classOf[UnifiedLog]) + when(mockLog.config).thenReturn(emptyPolicyConfig) + when(mockLog.logEndOffset).thenReturn(2L) + when(mockLog.logStartOffset).thenReturn(0L) + when(mockLog.highWatermark).thenReturn(2L) + when(mockLog.maybeIncrementLogStartOffset(any(), any())).thenReturn(true) + + partition.setLog(mockLog, false) + assertThrows(classOf[PolicyViolationException], () => partition.deleteRecordsOnLeader(1L)) + } } diff --git a/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala b/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala deleted file mode 100644 index 81f0a7fae1519..0000000000000 --- a/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.cluster - -import kafka.server.metadata.KRaftMetadataCache -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.NotLeaderOrFollowerException -import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{LogOffsetMetadata, UnifiedLog} -import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} -import org.junit.jupiter.api.{BeforeEach, Test} -import org.mockito.Mockito.{mock, when} - -import java.util.Optional - -object ReplicaTest { - val BrokerId: Int = 0 - val Partition: TopicPartition = new TopicPartition("foo", 0) - val ReplicaLagTimeMaxMs: Long = 30000 -} - -class ReplicaTest { - import ReplicaTest._ - - val time = new MockTime() - var replica: Replica = _ - - @BeforeEach - def setup(): Unit = { - val metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Optional.of(1L)) - replica = new Replica(BrokerId, Partition, metadataCache) - } - - private def assertReplicaState( - logStartOffset: Long, - logEndOffset: Long, - lastCaughtUpTimeMs: Long, - lastFetchLeaderLogEndOffset: Long, - lastFetchTimeMs: Long, - brokerEpoch: Option[Long] = Option[Long](1L) - ): Unit = { - val replicaState = replica.stateSnapshot - assertEquals(logStartOffset, replicaState.logStartOffset, - "Unexpected Log Start Offset") - assertEquals(logEndOffset, replicaState.logEndOffset, - "Unexpected Log End Offset") - assertEquals(lastCaughtUpTimeMs, replicaState.lastCaughtUpTimeMs, - "Unexpected Last Caught Up Time") - assertEquals(lastFetchLeaderLogEndOffset, replicaState.lastFetchLeaderLogEndOffset, - "Unexpected Last Fetch Leader Log End Offset") - assertEquals(lastFetchTimeMs, replicaState.lastFetchTimeMs, - "Unexpected Last Fetch Time") - assertEquals(brokerEpoch, replicaState.brokerEpoch, - "Broker Epoch Mismatch") - } - - def assertReplicaStateDoesNotChange( - op: => Unit - ): Unit = { - val previousState = replica.stateSnapshot - - op - - assertReplicaState( - logStartOffset = previousState.logStartOffset, - logEndOffset = previousState.logEndOffset, - lastCaughtUpTimeMs = previousState.lastCaughtUpTimeMs, - lastFetchLeaderLogEndOffset = previousState.lastFetchLeaderLogEndOffset, - lastFetchTimeMs = previousState.lastFetchTimeMs - ) - } - - private def updateFetchState( - followerFetchOffset: Long, - followerStartOffset: Long, - leaderEndOffset: Long - ): Long = { - val currentTimeMs = time.milliseconds() - replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(followerFetchOffset), - followerStartOffset = followerStartOffset, - followerFetchTimeMs = currentTimeMs, - leaderEndOffset = leaderEndOffset, - brokerEpoch = 1L - ) - currentTimeMs - } - - private def resetReplicaState( - leaderEndOffset: Long, - isNewLeader: Boolean, - isFollowerInSync: Boolean - ): Long = { - val currentTimeMs = time.milliseconds() - replica.resetReplicaState( - currentTimeMs = currentTimeMs, - leaderEndOffset = leaderEndOffset, - isNewLeader = isNewLeader, - isFollowerInSync = isFollowerInSync - ) - currentTimeMs - } - - private def isCaughtUp( - leaderEndOffset: Long - ): Boolean = { - replica.stateSnapshot.isCaughtUp( - leaderEndOffset = leaderEndOffset, - currentTimeMs = time.milliseconds(), - replicaMaxLagMs = ReplicaLagTimeMaxMs - ) - } - - @Test - def testInitialState(): Unit = { - assertReplicaState( - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastCaughtUpTimeMs = 0L, - lastFetchLeaderLogEndOffset = 0L, - lastFetchTimeMs = 0L, - brokerEpoch = Option.empty - ) - } - - @Test - def testUpdateFetchState(): Unit = { - val fetchTimeMs1 = updateFetchState( - followerFetchOffset = 5L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - assertReplicaState( - logStartOffset = 1L, - logEndOffset = 5L, - lastCaughtUpTimeMs = 0L, - lastFetchLeaderLogEndOffset = 10L, - lastFetchTimeMs = fetchTimeMs1 - ) - - val fetchTimeMs2 = updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 2L, - leaderEndOffset = 15L - ) - - assertReplicaState( - logStartOffset = 2L, - logEndOffset = 10L, - lastCaughtUpTimeMs = fetchTimeMs1, - lastFetchLeaderLogEndOffset = 15L, - lastFetchTimeMs = fetchTimeMs2 - ) - - val fetchTimeMs3 = updateFetchState( - followerFetchOffset = 15L, - followerStartOffset = 3L, - leaderEndOffset = 15L - ) - - assertReplicaState( - logStartOffset = 3L, - logEndOffset = 15L, - lastCaughtUpTimeMs = fetchTimeMs3, - lastFetchLeaderLogEndOffset = 15L, - lastFetchTimeMs = fetchTimeMs3 - ) - } - - @Test - def testResetReplicaStateWhenLeaderIsReelectedAndReplicaIsInSync(): Unit = { - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - val resetTimeMs1 = resetReplicaState( - leaderEndOffset = 11L, - isNewLeader = false, - isFollowerInSync = true - ) - - assertReplicaState( - logStartOffset = 1L, - logEndOffset = 10L, - lastCaughtUpTimeMs = resetTimeMs1, - lastFetchLeaderLogEndOffset = 11L, - lastFetchTimeMs = resetTimeMs1 - ) - } - - @Test - def testResetReplicaStateWhenLeaderIsReelectedAndReplicaIsNotInSync(): Unit = { - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - resetReplicaState( - leaderEndOffset = 11L, - isNewLeader = false, - isFollowerInSync = false - ) - - assertReplicaState( - logStartOffset = 1L, - logEndOffset = 10L, - lastCaughtUpTimeMs = 0L, - lastFetchLeaderLogEndOffset = 11L, - lastFetchTimeMs = 0L - ) - } - - @Test - def testResetReplicaStateWhenNewLeaderIsElectedAndReplicaIsInSync(): Unit = { - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - val resetTimeMs1 = resetReplicaState( - leaderEndOffset = 11L, - isNewLeader = true, - isFollowerInSync = true - ) - - assertReplicaState( - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastCaughtUpTimeMs = resetTimeMs1, - lastFetchLeaderLogEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastFetchTimeMs = 0L, - brokerEpoch = Option.empty - ) - } - - @Test - def testResetReplicaStateWhenNewLeaderIsElectedAndReplicaIsNotInSync(): Unit = { - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - resetReplicaState( - leaderEndOffset = 11L, - isNewLeader = true, - isFollowerInSync = false - ) - - assertReplicaState( - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastCaughtUpTimeMs = 0L, - lastFetchLeaderLogEndOffset = UnifiedLog.UNKNOWN_OFFSET, - lastFetchTimeMs = 0L, - brokerEpoch = Option.empty - ) - } - - @Test - def testIsCaughtUpWhenReplicaIsCaughtUpToLogEnd(): Unit = { - assertFalse(isCaughtUp(leaderEndOffset = 10L)) - - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - assertTrue(isCaughtUp(leaderEndOffset = 10L)) - - time.sleep(ReplicaLagTimeMaxMs + 1) - - assertTrue(isCaughtUp(leaderEndOffset = 10L)) - } - - @Test - def testIsCaughtUpWhenReplicaIsNotCaughtUpToLogEnd(): Unit = { - assertFalse(isCaughtUp(leaderEndOffset = 10L)) - - updateFetchState( - followerFetchOffset = 5L, - followerStartOffset = 1L, - leaderEndOffset = 10L - ) - - assertFalse(isCaughtUp(leaderEndOffset = 10L)) - - updateFetchState( - followerFetchOffset = 10L, - followerStartOffset = 1L, - leaderEndOffset = 15L - ) - - assertTrue(isCaughtUp(leaderEndOffset = 16L)) - - time.sleep(ReplicaLagTimeMaxMs + 1) - - assertFalse(isCaughtUp(leaderEndOffset = 16L)) - } - - @Test - def testFenceStaleUpdates(): Unit = { - val metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Optional.of(2L)) - - val replica = new Replica(BrokerId, Partition, metadataCache) - replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(5L), - followerStartOffset = 1L, - followerFetchTimeMs = 1, - leaderEndOffset = 10L, - brokerEpoch = 2L - ) - assertThrows(classOf[NotLeaderOrFollowerException], () => replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(5L), - followerStartOffset = 2L, - followerFetchTimeMs = 3, - leaderEndOffset = 10L, - brokerEpoch = 1L - )) - replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(5L), - followerStartOffset = 2L, - followerFetchTimeMs = 4, - leaderEndOffset = 10L, - brokerEpoch = -1L - ) - } -} diff --git a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala index 2eb0d7479e80f..8f10811091d70 100644 --- a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala @@ -20,20 +20,21 @@ package kafka.coordinator import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors} import java.util.{Collections, Random} import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.locks.Lock import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ import kafka.cluster.Partition import kafka.log.LogManager import kafka.server.QuotaFactory.QuotaManagers import kafka.server._ import kafka.utils._ -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, RecordValidationStats} import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets, TopicPartitionOperationKey} +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation import org.apache.kafka.server.util.timer.{MockTimer, Timer} import org.apache.kafka.server.util.{MockScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, UnifiedLog, VerificationGuard} @@ -182,15 +183,14 @@ object AbstractCoordinatorConcurrencyTest { logManager, None, quotaManagers, - null, + mock(classOf[MetadataCache]), null, null, delayedProducePurgatoryParam = Some(producePurgatory), delayedFetchPurgatoryParam = Some(delayedFetchPurgatoryParam), delayedDeleteRecordsPurgatoryParam = Some(delayedDeleteRecordsPurgatoryParam), delayedRemoteFetchPurgatoryParam = Some(delayedRemoteFetchPurgatoryParam), - delayedRemoteListOffsetsPurgatoryParam = Some(delayedRemoteListOffsetsPurgatoryParam), - threadNamePrefix = Option(this.getClass.getName)) { + delayedRemoteListOffsetsPurgatoryParam = Some(delayedRemoteListOffsetsPurgatoryParam)) { @volatile var logs: mutable.Map[TopicPartition, (UnifiedLog, Long)] = _ @@ -213,10 +213,9 @@ object AbstractCoordinatorConcurrencyTest { requiredAcks: Short, internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicPartition, MemoryRecords], - responseCallback: Map[TopicPartition, PartitionResponse] => Unit, - delayedProduceLock: Option[Lock] = None, - processingStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, + processingStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty): Unit = { @@ -226,7 +225,7 @@ object AbstractCoordinatorConcurrencyTest { case (tp, _) => (tp, ProducePartitionStatus(0L, new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) }) - val delayedProduce = new DelayedProduce(5, produceMetadata, this, responseCallback, delayedProduceLock) { + val delayedProduce = new DelayedProduce(5, produceMetadata, this, responseCallback) { // Complete produce requests after a few attempts to trigger delayed produce from different threads val completeAttempts = new AtomicInteger override def tryComplete(): Boolean = { diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala deleted file mode 100644 index a0ca3f23c48fd..0000000000000 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala +++ /dev/null @@ -1,632 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.group - -import kafka.server.ReplicaManager -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.errors.NotLeaderOrFollowerException -import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, FileRecords, MemoryRecords, RecordBatch, SimpleRecord} -import org.apache.kafka.common.requests.TransactionResult -import org.apache.kafka.common.utils.{MockTime, Time} -import org.apache.kafka.coordinator.common.runtime.Deserializer.UnknownRecordTypeException -import org.apache.kafka.coordinator.common.runtime.{CoordinatorPlayback, Deserializer} -import org.apache.kafka.server.storage.log.FetchIsolation -import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogOffsetMetadata, UnifiedLog} -import org.apache.kafka.test.TestUtils.assertFutureThrows -import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull} -import org.junit.jupiter.api.{Test, Timeout} -import org.mockito.ArgumentMatchers.anyLong -import org.mockito.{ArgumentCaptor, ArgumentMatchers} -import org.mockito.Mockito.{mock, times, verify, when} -import org.mockito.invocation.InvocationOnMock - -import java.nio.ByteBuffer -import java.nio.charset.Charset -import java.util.concurrent.{CountDownLatch, TimeUnit} -import scala.util.Using - -class StringKeyValueDeserializer extends Deserializer[(String, String)] { - override def deserialize(key: ByteBuffer, value: ByteBuffer): (String, String) = { - ( - Charset.defaultCharset().decode(key).toString, - Charset.defaultCharset().decode(value).toString - ) - } -} - -@Timeout(60) -class CoordinatorLoaderImplTest { - @Test - def testNonexistentPartition(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = mock(classOf[Deserializer[(String, String)]]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(None) - - val result = loader.load(tp, coordinator) - assertFutureThrows(classOf[NotLeaderOrFollowerException], result) - } - } - - @Test - def testLoadingIsRejectedWhenClosed(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = mock(classOf[Deserializer[(String, String)]]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - loader.close() - - val result = loader.load(tp, coordinator) - assertFutureThrows(classOf[RuntimeException], result) - } - } - - @Test - def testLoading(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(9L)) - when(log.highWatermark).thenReturn(0L) - - val readResult1 = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult1) - - val readResult2 = logReadResult(startOffset = 2, records = Seq( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes), - new SimpleRecord("k5".getBytes, "v5".getBytes) - )) - - when(log.read(2L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult2) - - val readResult3 = logReadResult(startOffset = 5, producerId = 100L, producerEpoch = 5, records = Seq( - new SimpleRecord("k6".getBytes, "v6".getBytes), - new SimpleRecord("k7".getBytes, "v7".getBytes) - )) - - when(log.read(5L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult3) - - val readResult4 = logReadResult( - startOffset = 7, - producerId = 100L, - producerEpoch = 5, - controlRecordType = ControlRecordType.COMMIT - ) - - when(log.read(7L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult4) - - val readResult5 = logReadResult( - startOffset = 8, - producerId = 500L, - producerEpoch = 10, - controlRecordType = ControlRecordType.ABORT - ) - - when(log.read(8L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult5) - - assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) - - verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) - verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) - verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) - verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) - verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) - verify(coordinator).replay(5L, 100L, 5.toShort, ("k6", "v6")) - verify(coordinator).replay(6L, 100L, 5.toShort, ("k7", "v7")) - verify(coordinator).replayEndTransactionMarker(100L, 5, TransactionResult.COMMIT) - verify(coordinator).replayEndTransactionMarker(500L, 10, TransactionResult.ABORT) - verify(coordinator).updateLastWrittenOffset(2) - verify(coordinator).updateLastWrittenOffset(5) - verify(coordinator).updateLastWrittenOffset(7) - verify(coordinator).updateLastWrittenOffset(8) - verify(coordinator).updateLastCommittedOffset(0) - } - } - - @Test - def testLoadingStoppedWhenClosed(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(100L)) - - val readResult = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - val latch = new CountDownLatch(1) - when(log.read( - ArgumentMatchers.anyLong(), - ArgumentMatchers.eq(1000), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true) - )).thenAnswer { _ => - latch.countDown() - readResult - } - - val result = loader.load(tp, coordinator) - latch.await(10, TimeUnit.SECONDS) - loader.close() - - val ex = assertFutureThrows(classOf[RuntimeException], result) - assertEquals("Coordinator loader is closed.", ex.getMessage) - } - } - - @Test - def testUnknownRecordTypeAreIgnored(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = mock(classOf[StringKeyValueDeserializer]) - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(2L)) - - val readResult = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult) - - when(serde.deserialize(ArgumentMatchers.any(), ArgumentMatchers.any())) - .thenThrow(new UnknownRecordTypeException(1)) - .thenReturn(("k2", "v2")) - - loader.load(tp, coordinator).get(10, TimeUnit.SECONDS) - - verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) - } - } - - @Test - def testDeserializationErrorFailsTheLoading(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = mock(classOf[StringKeyValueDeserializer]) - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(2L)) - - val readResult = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult) - - when(serde.deserialize(ArgumentMatchers.any(), ArgumentMatchers.any())) - .thenThrow(new RuntimeException("Error!")) - - val ex = assertFutureThrows(classOf[RuntimeException], loader.load(tp, coordinator)) - - assertEquals(s"Deserializing record DefaultRecord(offset=0, timestamp=-1, key=2 bytes, value=2 bytes) from $tp failed due to: Error!", ex.getMessage) - } - } - - @Test - def testLoadGroupAndOffsetsWithCorruptedLog(): Unit = { - // Simulate a case where startOffset < endOffset but log is empty. This could theoretically happen - // when all the records are expired and the active segment is truncated or when the partition - // is accidentally corrupted. - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = mock(classOf[StringKeyValueDeserializer]) - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(10L)) - - val readResult = logReadResult(startOffset = 0, records = Seq()) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult) - - assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) - } - } - - @Test - def testLoadSummary(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - val time = new MockTime() - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - val startTimeMs = time.milliseconds() - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(5L)) - - val readResult1 = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenAnswer((_: InvocationOnMock) => { - time.sleep(1000) - readResult1 - }) - - val readResult2 = logReadResult(startOffset = 2, records = Seq( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes), - new SimpleRecord("k5".getBytes, "v5".getBytes) - )) - - when(log.read(2L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult2) - - val summary = loader.load(tp, coordinator).get(10, TimeUnit.SECONDS) - assertEquals(startTimeMs, summary.startTimeMs()) - assertEquals(startTimeMs + 1000, summary.endTimeMs()) - assertEquals(5, summary.numRecords()) - assertEquals(readResult1.records.sizeInBytes() + readResult2.records.sizeInBytes(), summary.numBytes()) - } - } - - @Test - def testUpdateLastWrittenOffsetOnBatchLoaded(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(log.highWatermark).thenReturn(0L).thenReturn(0L).thenReturn(2L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(7L)) - - val readResult1 = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult1) - - val readResult2 = logReadResult(startOffset = 2, records = Seq( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes), - new SimpleRecord("k5".getBytes, "v5".getBytes) - )) - - when(log.read(2L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult2) - - val readResult3 = logReadResult(startOffset = 5, records = Seq( - new SimpleRecord("k6".getBytes, "v6".getBytes), - new SimpleRecord("k7".getBytes, "v7".getBytes) - )) - - when(log.read(5L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult3) - - assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) - - verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) - verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) - verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) - verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) - verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) - verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k6", "v6")) - verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k7", "v7")) - verify(coordinator, times(0)).updateLastWrittenOffset(0) - verify(coordinator, times(1)).updateLastWrittenOffset(2) - verify(coordinator, times(1)).updateLastWrittenOffset(5) - verify(coordinator, times(1)).updateLastWrittenOffset(7) - verify(coordinator, times(1)).updateLastCommittedOffset(0) - verify(coordinator, times(1)).updateLastCommittedOffset(2) - verify(coordinator, times(0)).updateLastCommittedOffset(5) - } - } - - @Test - def testUpdateLastWrittenOffsetAndUpdateLastCommittedOffsetNoRecordsRead(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(log.highWatermark).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(0L)) - - assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) - - verify(coordinator, times(0)).updateLastWrittenOffset(anyLong()) - verify(coordinator, times(0)).updateLastCommittedOffset(anyLong()) - } - } - - @Test - def testUpdateLastWrittenOffsetOnBatchLoadedWhileHighWatermarkAhead(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(log.highWatermark).thenReturn(5L).thenReturn(7L).thenReturn(7L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(7L)) - - val readResult1 = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult1) - - val readResult2 = logReadResult(startOffset = 2, records = Seq( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes), - new SimpleRecord("k5".getBytes, "v5".getBytes) - )) - - when(log.read(2L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult2) - - val readResult3 = logReadResult(startOffset = 5, records = Seq( - new SimpleRecord("k6".getBytes, "v6".getBytes), - new SimpleRecord("k7".getBytes, "v7".getBytes) - )) - - when(log.read(5L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult3) - - assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) - - verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) - verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) - verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) - verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) - verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) - verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k6", "v6")) - verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k7", "v7")) - verify(coordinator, times(0)).updateLastWrittenOffset(0) - verify(coordinator, times(0)).updateLastWrittenOffset(2) - verify(coordinator, times(0)).updateLastWrittenOffset(5) - verify(coordinator, times(1)).updateLastWrittenOffset(7) - verify(coordinator, times(0)).updateLastCommittedOffset(0) - verify(coordinator, times(0)).updateLastCommittedOffset(2) - verify(coordinator, times(0)).updateLastCommittedOffset(5) - verify(coordinator, times(1)).updateLastCommittedOffset(7) - } - } - - @Test - def testPartitionGoesOfflineDuringLoad(): Unit = { - val tp = new TopicPartition("foo", 0) - val replicaManager = mock(classOf[ReplicaManager]) - val serde = new StringKeyValueDeserializer - val log = mock(classOf[UnifiedLog]) - val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - - Using.resource(new CoordinatorLoaderImpl[(String, String)]( - time = Time.SYSTEM, - replicaManager = replicaManager, - deserializer = serde, - loadBufferSize = 1000 - )) { loader => - when(replicaManager.getLog(tp)).thenReturn(Some(log)) - when(log.logStartOffset).thenReturn(0L) - when(log.highWatermark).thenReturn(0L) - when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(5L)).thenReturn(Some(-1L)) - - val readResult1 = logReadResult(startOffset = 0, records = Seq( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - )) - - when(log.read(0L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult1) - - val readResult2 = logReadResult(startOffset = 2, records = Seq( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes), - new SimpleRecord("k5".getBytes, "v5".getBytes) - )) - - when(log.read(2L, 1000, FetchIsolation.LOG_END, true - )).thenReturn(readResult2) - - assertFutureThrows(classOf[NotLeaderOrFollowerException], loader.load(tp, coordinator)) - } - } - - private def logReadResult( - startOffset: Long, - producerId: Long = RecordBatch.NO_PRODUCER_ID, - producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH, - records: Seq[SimpleRecord] - ): FetchDataInfo = { - val fileRecords = mock(classOf[FileRecords]) - val memoryRecords = if (producerId == RecordBatch.NO_PRODUCER_ID) { - MemoryRecords.withRecords( - startOffset, - Compression.NONE, - records: _* - ) - } else { - MemoryRecords.withTransactionalRecords( - startOffset, - Compression.NONE, - producerId, - producerEpoch, - 0, - RecordBatch.NO_PARTITION_LEADER_EPOCH, - records: _* - ) - } - - when(fileRecords.sizeInBytes).thenReturn(memoryRecords.sizeInBytes) - - val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) - when(fileRecords.readInto( - bufferCapture.capture(), - ArgumentMatchers.anyInt()) - ).thenAnswer { _ => - val buffer = bufferCapture.getValue - buffer.put(memoryRecords.buffer.duplicate) - buffer.flip() - } - - new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords) - } - - private def logReadResult( - startOffset: Long, - producerId: Long, - producerEpoch: Short, - controlRecordType: ControlRecordType - ): FetchDataInfo = { - val fileRecords = mock(classOf[FileRecords]) - val memoryRecords = MemoryRecords.withEndTransactionMarker( - startOffset, - 0L, - RecordBatch.NO_PARTITION_LEADER_EPOCH, - producerId, - producerEpoch, - new EndTransactionMarker(controlRecordType, 0) - ) - - when(fileRecords.sizeInBytes).thenReturn(memoryRecords.sizeInBytes) - - val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) - when(fileRecords.readInto( - bufferCapture.capture(), - ArgumentMatchers.anyInt()) - ).thenAnswer { _ => - val buffer = bufferCapture.getValue - buffer.put(memoryRecords.buffer.duplicate) - buffer.flip() - } - - new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords) - } -} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala index 3a6ee39dcc1e5..c55d2c0da3dc1 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala @@ -17,7 +17,7 @@ package kafka.coordinator.group import kafka.server.{LogAppendResult, ReplicaManager} -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.errors.NotLeaderOrFollowerException import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult @@ -34,9 +34,9 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{mock, verify, when} import java.nio.charset.Charset +import java.util import java.util.Optional import scala.collection.Map -import scala.jdk.CollectionConverters._ class CoordinatorPartitionWriterTest { @Test @@ -75,8 +75,8 @@ class CoordinatorPartitionWriterTest { replicaManager ) - when(replicaManager.getLogConfig(tp)).thenReturn(Some(new LogConfig(Map.empty.asJava))) - assertEquals(new LogConfig(Map.empty.asJava), partitionRecordWriter.config(tp)) + when(replicaManager.getLogConfig(tp)).thenReturn(Some(new LogConfig(util.Map.of))) + assertEquals(new LogConfig(util.Map.of), partitionRecordWriter.config(tp)) when(replicaManager.getLogConfig(tp)).thenReturn(None) assertThrows(classOf[NotLeaderOrFollowerException], () => partitionRecordWriter.config(tp)) @@ -86,13 +86,16 @@ class CoordinatorPartitionWriterTest { @Test def testWriteRecords(): Unit = { val tp = new TopicPartition("foo", 0) + val topicId = Uuid.fromString("TbEp6-A4s3VPT1TwiI5COw") val replicaManager = mock(classOf[ReplicaManager]) + when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicId, tp)) + val partitionRecordWriter = new CoordinatorPartitionWriter( - replicaManager + replicaManager ) - val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) when(replicaManager.appendRecordsToLeader( ArgumentMatchers.eq(1.toShort), @@ -102,7 +105,7 @@ class CoordinatorPartitionWriterTest { ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.eq(Map(tp -> VerificationGuard.SENTINEL)), - )).thenReturn(Map(tp -> LogAppendResult( + )).thenReturn(Map(new TopicIdPartition(topicId, tp) -> LogAppendResult( new LogAppendInfo( 5L, 10L, @@ -116,7 +119,7 @@ class CoordinatorPartitionWriterTest { 10L ), Option.empty, - false + hasCustomErrorMessage = false ))) val batch = MemoryRecords.withRecords( @@ -133,10 +136,9 @@ class CoordinatorPartitionWriterTest { VerificationGuard.SENTINEL, batch )) - assertEquals( batch, - recordsCapture.getValue.getOrElse(tp, throw new AssertionError(s"No records for $tp")) + recordsCapture.getValue.getOrElse(new TopicIdPartition(topicId, tp), throw new AssertionError(s"No records for $tp")) ) } @@ -191,13 +193,16 @@ class CoordinatorPartitionWriterTest { @Test def testWriteRecordsWithFailure(): Unit = { val tp = new TopicPartition("foo", 0) + val topicId = Uuid.fromString("TbEp6-A4s3VPT1TwiI5COw") val replicaManager = mock(classOf[ReplicaManager]) + when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicId, tp)) + val partitionRecordWriter = new CoordinatorPartitionWriter( replicaManager ) - val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) when(replicaManager.appendRecordsToLeader( ArgumentMatchers.eq(1.toShort), @@ -207,10 +212,10 @@ class CoordinatorPartitionWriterTest { ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.eq(Map(tp -> VerificationGuard.SENTINEL)), - )).thenReturn(Map(tp -> LogAppendResult( + )).thenReturn(Map(new TopicIdPartition(topicId, tp) -> LogAppendResult( LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(Errors.NOT_LEADER_OR_FOLLOWER.exception), - false + hasCustomErrorMessage = false ))) val batch = MemoryRecords.withRecords( diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala index cfe89a4164907..79957b01fb77b 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala @@ -17,25 +17,27 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer -import java.util.Collections +import java.util import java.util.Optional +import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.AtomicBoolean import kafka.coordinator.AbstractCoordinatorConcurrencyTest import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ import kafka.coordinator.transaction.TransactionCoordinatorConcurrencyTest._ import kafka.server.KafkaConfig -import kafka.utils.{Pool, TestUtils} +import kafka.utils.TestUtils import org.apache.kafka.clients.{ClientResponse, NetworkClient} -import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME +import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch, SimpleRecord} import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{LogContext, MockTime, ProducerIdAndEpoch} -import org.apache.kafka.common.{Node, TopicPartition} -import org.apache.kafka.coordinator.transaction.ProducerIdManager +import org.apache.kafka.common.{Node, TopicPartition, Uuid} +import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionMetadata, TransactionState} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.storage.log.FetchIsolation @@ -61,7 +63,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren private val allOperations = Seq( new InitProducerIdOperation, - new AddPartitionsToTxnOperation(Set(new TopicPartition("topic", 0))), + new AddPartitionsToTxnOperation(util.Set.of(new TopicPartition("topic", 0))), new EndTxnOperation) private val allTransactions = mutable.Set[Transaction]() @@ -85,18 +87,18 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - Collections.singletonMap(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), + util.Map.of(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), 0) } when(metadataCache.metadataVersion()) .thenReturn(MetadataVersion.latestProduction()) - + txnStateManager = new TransactionStateManager(0, scheduler, replicaManager, metadataCache, txnConfig, time, new Metrics()) txnStateManager.startup(() => numPartitions, enableTransactionalIdExpiration = true) for (i <- 0 until numPartitions) - txnStateManager.addLoadedTransactionsToCache(i, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + txnStateManager.addLoadedTransactionsToCache(i, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) val pidGenerator: ProducerIdManager = mock(classOf[ProducerIdManager]) when(pidGenerator.generateProducerId()) @@ -113,6 +115,10 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren txnStateManager, time) + val transactionStateTopicId = Uuid.randomUuid() + when(replicaManager.metadataCache.getTopicName(transactionStateTopicId)).thenReturn(Optional.of(Topic.TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.metadataCache.getTopicId(Topic.TRANSACTION_STATE_TOPIC_NAME)).thenReturn(transactionStateTopicId) + transactionCoordinator = new TransactionCoordinator( txnConfig, scheduler, @@ -453,7 +459,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren val partitionId = txnStateManager.partitionFor(txn.transactionalId) val txnRecords = txnRecordsByPartition(partitionId) val initPidOp = new InitProducerIdOperation() - val addPartitionsOp = new AddPartitionsToTxnOperation(Set(new TopicPartition("topic", 0))) + val addPartitionsOp = new AddPartitionsToTxnOperation(util.Set.of(new TopicPartition("topic", 0))) initPidOp.run(txn) initPidOp.awaitAndVerify(txn) addPartitionsOp.run(txn) @@ -462,7 +468,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren val txnMetadata = transactionMetadata(txn).getOrElse(throw new IllegalStateException(s"Transaction not found $txn")) txnRecords += new SimpleRecord(txn.txnMessageKeyBytes, TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TransactionVersion.TV_2)) - txnMetadata.state = PrepareCommit + txnMetadata.state(TransactionState.PREPARE_COMMIT) txnRecords += new SimpleRecord(txn.txnMessageKeyBytes, TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TransactionVersion.TV_2)) prepareTxnLog(partitionId) @@ -470,7 +476,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren private def prepareTxnLog(partitionId: Int): Unit = { val logMock: UnifiedLog = mock(classOf[UnifiedLog]) - when(logMock.config).thenReturn(new LogConfig(Collections.emptyMap())) + when(logMock.config).thenReturn(new LogConfig(util.Map.of)) val fileRecordsMock: FileRecords = mock(classOf[FileRecords]) @@ -500,17 +506,18 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren } private def prepareExhaustedEpochTxnMetadata(txn: Transaction): TransactionMetadata = { - new TransactionMetadata(transactionalId = txn.transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = (Short.MaxValue - 1).toShort, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 60000, - state = Empty, - topicPartitions = collection.mutable.Set.empty[TopicPartition], - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TransactionVersion.TV_0) + new TransactionMetadata(txn.transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + (Short.MaxValue - 1).toShort, + RecordBatch.NO_PRODUCER_EPOCH, + 60000, + TransactionState.EMPTY, + new util.HashSet[TopicPartition](), + -1, + time.milliseconds(), + TransactionVersion.TV_0) } abstract class TxnOperation[R] extends Operation { @@ -538,11 +545,11 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def awaitAndVerify(txn: Transaction): Unit = { val initPidResult = result.getOrElse(throw new IllegalStateException("InitProducerId has not completed")) assertEquals(Errors.NONE, initPidResult.error) - verifyTransaction(txn, Empty) + verifyTransaction(txn, TransactionState.EMPTY) } } - class AddPartitionsToTxnOperation(partitions: Set[TopicPartition]) extends TxnOperation[Errors] { + class AddPartitionsToTxnOperation(partitions: util.Set[TopicPartition]) extends TxnOperation[Errors] { override def run(txn: Transaction): Unit = { transactionMetadata(txn).foreach { txnMetadata => transactionCoordinator.handleAddPartitionsToTransaction(txn.transactionalId, @@ -558,7 +565,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def awaitAndVerify(txn: Transaction): Unit = { val error = result.getOrElse(throw new IllegalStateException("AddPartitionsToTransaction has not completed")) assertEquals(Errors.NONE, error) - verifyTransaction(txn, Ongoing) + verifyTransaction(txn, TransactionState.ONGOING) } } @@ -579,7 +586,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren if (!txn.ended) { txn.ended = true assertEquals(Errors.NONE, error) - val expectedState = if (transactionResult(txn) == TransactionResult.COMMIT) CompleteCommit else CompleteAbort + val expectedState = if (transactionResult(txn) == TransactionResult.COMMIT) TransactionState.COMPLETE_COMMIT else TransactionState.COMPLETE_ABORT verifyTransaction(txn, expectedState) } else assertEquals(Errors.INVALID_TXN_STATE, error) @@ -600,7 +607,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def await(): Unit = { allTransactions.foreach { txn => if (txnStateManager.partitionFor(txn.transactionalId) == txnTopicPartitionId) { - verifyTransaction(txn, CompleteCommit) + verifyTransaction(txn, TransactionState.COMPLETE_COMMIT) } } } @@ -623,7 +630,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def run(): Unit = { transactions.foreach { txn => transactionMetadata(txn).foreach { txnMetadata => - txnMetadata.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs + txnMetadata.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) } } txnStateManager.enableTransactionalIdExpiration() diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala index 94ccd6dc03da7..d9f6e115fbba8 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala @@ -22,7 +22,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{AddPartitionsToTxnResponse, TransactionResult} import org.apache.kafka.common.utils.{LogContext, MockTime, ProducerIdAndEpoch} -import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionStateManagerConfig} +import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} import org.apache.kafka.server.common.TransactionVersion import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} import org.apache.kafka.server.util.MockScheduler @@ -34,7 +34,8 @@ import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt} import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, ArgumentMatchers} -import scala.collection.mutable +import java.util + import scala.jdk.CollectionConverters._ class TransactionCoordinatorTest { @@ -56,7 +57,8 @@ class TransactionCoordinatorTest { private val txnTimeoutMs = 1 private val producerId2 = 11L - private val partitions = mutable.Set[TopicPartition](new TopicPartition("topic1", 0)) + private val partitions = new util.HashSet[TopicPartition]() + partitions.add(new TopicPartition("topic1", 0)) private val scheduler = new MockScheduler(time) val coordinator = new TransactionCoordinator( @@ -197,7 +199,7 @@ class TransactionCoordinatorTest { initPidGenericMocks(transactionalId) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_0) + (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, util.Set.of, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -229,11 +231,11 @@ class TransactionCoordinatorTest { initPidGenericMocks(transactionalId) val txnMetadata1 = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_2) - // We start with txnMetadata1 so we can transform the metadata to PrepareCommit. + (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, util.Set.of, time.milliseconds(), time.milliseconds(), TV_2) + // We start with txnMetadata1 so we can transform the metadata to TransactionState.PREPARE_COMMIT. val txnMetadata2 = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_2) - val transitMetadata = txnMetadata2.prepareAbortOrCommit(PrepareCommit, TV_2, producerId2, time.milliseconds(), false) + (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, util.Set.of, time.milliseconds(), time.milliseconds(), TV_2) + val transitMetadata = txnMetadata2.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, producerId2, time.milliseconds(), false) txnMetadata2.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata2.producerId) @@ -342,7 +344,7 @@ class TransactionCoordinatorTest { } // If producer ID is not the same, return INVALID_PRODUCER_ID_MAPPING val wrongPidTxnMetadata = new TransactionMetadata(transactionalId, 1, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, wrongPidTxnMetadata)))) @@ -353,7 +355,7 @@ class TransactionCoordinatorTest { // If producer epoch is not equal, return PRODUCER_FENCED val oldEpochTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, oldEpochTxnMetadata)))) @@ -364,7 +366,7 @@ class TransactionCoordinatorTest { // If the txn state is Prepare or AbortCommit, we return CONCURRENT_TRANSACTIONS val emptyTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, emptyTxnMetadata)))) @@ -375,8 +377,8 @@ class TransactionCoordinatorTest { // Pending state does not matter, we will just check if the partitions are in the txnMetadata. val ongoingTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, mutable.Set.empty, 0, 0, TV_0) - ongoingTxnMetadata.pendingState = Some(CompleteCommit) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, util.Set.of, 0, 0, TV_0) + ongoingTxnMetadata.pendingState(util.Optional.of(TransactionState.COMPLETE_COMMIT)) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, ongoingTxnMetadata)))) @@ -388,20 +390,20 @@ class TransactionCoordinatorTest { @Test def shouldRespondWithConcurrentTransactionsOnAddPartitionsWhenStateIsPrepareCommit(): Unit = { - validateConcurrentTransactions(PrepareCommit) + validateConcurrentTransactions(TransactionState.PREPARE_COMMIT) } @Test def shouldRespondWithConcurrentTransactionOnAddPartitionsWhenStateIsPrepareAbort(): Unit = { - validateConcurrentTransactions(PrepareAbort) + validateConcurrentTransactions(TransactionState.PREPARE_ABORT) } def validateConcurrentTransactions(state: TransactionState): Unit = { - // Since the clientTransactionVersion doesn't matter, use 2 since the states are PrepareCommit and PrepareAbort. + // Since the clientTransactionVersion doesn't matter, use 2 since the states are TransactionState.PREPARE_COMMIT and TransactionState.PREPARE_ABORT. when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, mutable.Set.empty, 0, 0, TV_2))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, util.Set.of, 0, 0, TV_2))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) @@ -409,11 +411,11 @@ class TransactionCoordinatorTest { @Test def shouldRespondWithProducerFencedOnAddPartitionsWhenEpochsAreDifferent(): Unit = { - // Since the clientTransactionVersion doesn't matter, use 2 since the state is PrepareCommit. + // Since the clientTransactionVersion doesn't matter, use 2 since the state is TransactionState.PREPARE_COMMIT. when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 10, 9, 0, PrepareCommit, mutable.Set.empty, 0, 0, TV_2))))) + 10, 9, 0, TransactionState.PREPARE_COMMIT, util.Set.of, 0, 0, TV_2))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.PRODUCER_FENCED, error) @@ -421,30 +423,30 @@ class TransactionCoordinatorTest { @Test def shouldAppendNewMetadataToLogOnAddPartitionsWhenPartitionsAdded(): Unit = { - validateSuccessfulAddPartitions(Empty, 0) + validateSuccessfulAddPartitions(TransactionState.EMPTY, 0) } @Test def shouldRespondWithSuccessOnAddPartitionsWhenStateIsOngoing(): Unit = { - validateSuccessfulAddPartitions(Ongoing, 0) + validateSuccessfulAddPartitions(TransactionState.ONGOING, 0) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteCommit(clientTransactionVersion: Short): Unit = { - validateSuccessfulAddPartitions(CompleteCommit, clientTransactionVersion) + validateSuccessfulAddPartitions(TransactionState.COMPLETE_COMMIT, clientTransactionVersion) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteAbort(clientTransactionVersion: Short): Unit = { - validateSuccessfulAddPartitions(CompleteAbort, clientTransactionVersion) + validateSuccessfulAddPartitions(TransactionState.COMPLETE_ABORT, clientTransactionVersion) } def validateSuccessfulAddPartitions(previousState: TransactionState, transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, previousState, mutable.Set.empty, time.milliseconds(), time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, previousState, util.Set.of, time.milliseconds(), time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -467,7 +469,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Empty, partitions, 0, 0, TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.EMPTY, partitions, 0, 0, TV_0))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_0) assertEquals(Errors.NONE, error) @@ -484,7 +486,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, partitions, 0, 0, TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, partitions, 0, 0, TV_0))))) coordinator.handleVerifyPartitionsInTransaction(transactionalId, 0L, 0, partitions, verifyPartitionsInTxnCallback) errors.foreach { case (_, error) => @@ -503,9 +505,10 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Empty, partitions, 0, 0, TV_0))))) - - val extraPartitions = partitions ++ Set(new TopicPartition("topic2", 0)) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.EMPTY, partitions, 0, 0, TV_0))))) + + val extraPartitions = new util.HashSet[TopicPartition](partitions) + extraPartitions.add(new TopicPartition("topic2", 0)) coordinator.handleVerifyPartitionsInTransaction(transactionalId, 0L, 0, extraPartitions, verifyPartitionsInTxnCallback) assertEquals(Errors.TRANSACTION_ABORTABLE, errors(new TopicPartition("topic2", 0))) @@ -532,7 +535,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 10, 10, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, util.Set.of, 0, time.milliseconds(), TV_0))))) coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error) @@ -546,7 +549,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) + (producerEpoch - 1).toShort, 1, TransactionState.ONGOING, util.Set.of, 0, time.milliseconds(), TV_0))))) coordinator.handleEndTransaction(transactionalId, producerId, 0, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.PRODUCER_FENCED, error) @@ -560,7 +563,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -587,7 +590,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -604,7 +607,7 @@ class TransactionCoordinatorTest { def testEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV1(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -623,7 +626,7 @@ class TransactionCoordinatorTest { def shouldReturnOkOnEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV2(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -660,7 +663,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteAbortAndResultIsNotAbort(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -673,7 +676,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteCommitAndResultIsNotCommit(): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort,1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort,1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -687,7 +690,7 @@ class TransactionCoordinatorTest { def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV1(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -706,7 +709,7 @@ class TransactionCoordinatorTest { def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV2(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -737,7 +740,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) @@ -750,7 +753,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.PREPARE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_TXN_STATE, error) @@ -762,7 +765,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_TXN_STATE, error) @@ -775,7 +778,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) @@ -804,7 +807,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -820,7 +823,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnV2IfNotEndTxnV2Retry(): Unit = { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return PRODUCER_FENCED. coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -829,7 +832,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return INVALID_TXN_STATE. coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -841,7 +844,7 @@ class TransactionCoordinatorTest { def shouldReturnOkOnEndTxnV2IfEndTxnV2RetryEpochOverflow(): Unit = { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) + producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) // Return CONCURRENT_TRANSACTIONS while transaction is still completing coordinator.handleEndTransaction(transactionalId, producerId, (Short.MaxValue - 1).toShort, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -850,7 +853,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId2, producerId, - RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) coordinator.handleEndTransaction(transactionalId, producerId, (Short.MaxValue - 1).toShort, TransactionResult.COMMIT, TV_2, endTxnCallback) assertEquals(Errors.NONE, error) @@ -863,7 +866,7 @@ class TransactionCoordinatorTest { @Test def shouldReturnConcurrentTxnOnAddPartitionsIfEndTxnV2EpochOverflowAndNotComplete(): Unit = { val prepareWithPending = new TransactionMetadata(transactionalId, producerId, producerId, - producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2) + producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2) val txnTransitMetadata = prepareWithPending.prepareComplete(time.milliseconds()) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) @@ -875,7 +878,7 @@ class TransactionCoordinatorTest { verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) prepareWithPending.completeTransitionTo(txnTransitMetadata) - assertEquals(CompleteCommit, prepareWithPending.state) + assertEquals(TransactionState.COMPLETE_COMMIT, prepareWithPending.state) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, prepareWithPending)))) when(transactionManager.appendTransactionToLog( @@ -897,7 +900,7 @@ class TransactionCoordinatorTest { @ValueSource(shorts = Array(0, 2)) def shouldAppendPrepareCommitToLogOnEndTxnWhenStatusIsOngoingAndResultIsCommit(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - mockPrepare(PrepareCommit, clientTransactionVersion) + mockPrepare(TransactionState.PREPARE_COMMIT, clientTransactionVersion) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) @@ -914,7 +917,7 @@ class TransactionCoordinatorTest { @ValueSource(shorts = Array(0, 2)) def shouldAppendPrepareAbortToLogOnEndTxnWhenStatusIsOngoingAndResultIsAbort(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - mockPrepare(PrepareAbort, clientTransactionVersion) + mockPrepare(TransactionState.PREPARE_ABORT, clientTransactionVersion) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) @@ -989,7 +992,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, metadataEpoch, 1, - 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.PRODUCER_FENCED, error) @@ -998,29 +1001,29 @@ class TransactionCoordinatorTest { @Test def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingEmptyTransaction(): Unit = { - validateIncrementEpochAndUpdateMetadata(Empty, 0) + validateIncrementEpochAndUpdateMetadata(TransactionState.EMPTY, 0) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteTransaction(clientTransactionVersion: Short): Unit = { - validateIncrementEpochAndUpdateMetadata(CompleteAbort, clientTransactionVersion) + validateIncrementEpochAndUpdateMetadata(TransactionState.COMPLETE_ABORT, clientTransactionVersion) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteCommitTransaction(clientTransactionVersion: Short): Unit = { - validateIncrementEpochAndUpdateMetadata(CompleteCommit, clientTransactionVersion) + validateIncrementEpochAndUpdateMetadata(TransactionState.COMPLETE_COMMIT, clientTransactionVersion) } @Test def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareCommitState(): Unit = { - validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareCommit) + validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(TransactionState.PREPARE_COMMIT) } @Test def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareAbortState(): Unit = { - validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareAbort) + validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(TransactionState.PREPARE_ABORT) } @ParameterizedTest(name = "enableTwoPCFlag={0}, keepPreparedTxn={1}") @@ -1030,7 +1033,7 @@ class TransactionCoordinatorTest { keepPreparedTxn: Boolean ): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1041,7 +1044,7 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), @@ -1066,7 +1069,7 @@ class TransactionCoordinatorTest { verify(transactionManager).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)), + ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)), any(), any(), any()) @@ -1075,13 +1078,13 @@ class TransactionCoordinatorTest { @Test def shouldFailToAbortTransactionOnHandleInitPidWhenProducerEpochIsSmaller(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) val bumpedTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 2).toShort, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (producerEpoch + 2).toShort, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1106,7 +1109,7 @@ class TransactionCoordinatorTest { @Test def shouldNotRepeatedlyBumpEpochDueToInitPidDuringOngoingTxnIfAppendToLogFails(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1120,8 +1123,8 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), @@ -1131,10 +1134,10 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NOT_ENOUGH_REPLICAS) - txnMetadata.pendingState = None + txnMetadata.pendingState(util.Optional.empty()) }).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NOT_ENOUGH_REPLICAS) - txnMetadata.pendingState = None + txnMetadata.pendingState(util.Optional.empty()) }).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) @@ -1198,14 +1201,14 @@ class TransactionCoordinatorTest { @Test def shouldUseLastEpochToFenceWhenEpochsAreExhausted(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) val postFenceTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - Short.MaxValue, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions, time.milliseconds(), time.milliseconds(), TV_0) + Short.MaxValue, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1217,18 +1220,18 @@ class TransactionCoordinatorTest { when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(TxnTransitMetadata( - producerId = producerId, - prevProducerId = producerId, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = Short.MaxValue, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = txnTimeoutMs, - txnState = PrepareAbort, - topicPartitions = partitions.clone, - txnStartTimestamp = time.milliseconds(), - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0)), + ArgumentMatchers.eq(new TxnTransitMetadata( + producerId, + producerId, + RecordBatch.NO_PRODUCER_ID, + Short.MaxValue, + RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs, + TransactionState.PREPARE_ABORT, + partitions, + time.milliseconds(), + time.milliseconds(), + TV_0)), capturedErrorsCallback.capture(), any(), any()) @@ -1250,29 +1253,165 @@ class TransactionCoordinatorTest { verify(transactionManager).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(TxnTransitMetadata( - producerId = producerId, - prevProducerId = producerId, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = Short.MaxValue, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = txnTimeoutMs, - txnState = PrepareAbort, - topicPartitions = partitions.clone, - txnStartTimestamp = time.milliseconds(), - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0)), + ArgumentMatchers.eq(new TxnTransitMetadata( + producerId, + producerId, + RecordBatch.NO_PRODUCER_ID, + Short.MaxValue, + RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs, + TransactionState.PREPARE_ABORT, + partitions, + time.milliseconds(), + time.milliseconds(), + TV_0)), + any(), + any(), + any()) + } + + @Test + def shouldNotCauseEpochOverflowWhenInitPidDuringOngoingTxnV2(): Unit = { + // When InitProducerId is called with an ongoing transaction at epoch 32766 (Short.MaxValue - 1), + // it should not cause an epoch overflow by incrementing twice. + // The only true increment happens in prepareAbortOrCommit + val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, + (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_2) + + when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + .thenReturn(true) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) + + // Capture the transition metadata to verify epoch increments + val capturedTxnTransitMetadata: ArgumentCaptor[TxnTransitMetadata] = ArgumentCaptor.forClass(classOf[TxnTransitMetadata]) + when(transactionManager.appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + capturedTxnTransitMetadata.capture(), + capturedErrorsCallback.capture(), + any(), + any()) + ).thenAnswer(invocation => { + val transitMetadata = invocation.getArgument[TxnTransitMetadata](2) + // Simulate the metadata update that would happen in the real appendTransactionToLog + txnMetadata.completeTransitionTo(transitMetadata) + capturedErrorsCallback.getValue.apply(Errors.NONE) + }) + + // Handle InitProducerId with ongoing transaction at epoch 32766 + coordinator.handleInitProducerId( + transactionalId, + txnTimeoutMs, + enableTwoPCFlag = false, + keepPreparedTxn = false, + None, + initProducerIdMockCallback + ) + + // Verify that the epoch did not overflow (should be Short.MaxValue = 32767, not negative) + assertEquals(Short.MaxValue, txnMetadata.producerEpoch) + assertEquals(TransactionState.PREPARE_ABORT, txnMetadata.state) + + verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager, times(3)).getTransactionState(ArgumentMatchers.eq(transactionalId)) + verify(transactionManager).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + any[TxnTransitMetadata], any(), any(), any()) } + @Test + def shouldHandleTimeoutAtEpochOverflowBoundaryCorrectlyTV2(): Unit = { + // Test the scenario where we have an ongoing transaction at epoch 32766 (Short.MaxValue - 1) + // and the producer crashes/times out. This test verifies that the timeout handling + // correctly manages the epoch overflow scenario without causing failures. + + val epochAtMaxBoundary = (Short.MaxValue - 1).toShort // 32766 + val now = time.milliseconds() + + // Create transaction metadata at the epoch boundary that would cause overflow IFF double-incremented + val txnMetadata = new TransactionMetadata( + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + epochAtMaxBoundary, + RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs, + TransactionState.ONGOING, + partitions, + now, + now, + TV_2 + ) + assertTrue(txnMetadata.isProducerEpochExhausted) + + // Mock the transaction manager to return our test transaction as timed out + when(transactionManager.timedOutTransactions()) + .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, epochAtMaxBoundary))) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) + + // Mock the append operation to simulate successful write and update the metadata + when(transactionManager.appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + any[TxnTransitMetadata], + capturedErrorsCallback.capture(), + any(), + any()) + ).thenAnswer(invocation => { + val transitMetadata = invocation.getArgument[TxnTransitMetadata](2) + // Simulate the metadata update that would happen in the real appendTransactionToLog + txnMetadata.completeTransitionTo(transitMetadata) + capturedErrorsCallback.getValue.apply(Errors.NONE) + }) + + // Track the actual behavior + var callbackInvoked = false + var resultError: Errors = null + var resultProducerId: Long = -1 + var resultEpoch: Short = -1 + + def checkOnEndTransactionComplete(txnIdAndPidEpoch: TransactionalIdAndProducerIdEpoch) + (error: Errors, newProducerId: Long, newProducerEpoch: Short): Unit = { + callbackInvoked = true + resultError = error + resultProducerId = newProducerId + resultEpoch = newProducerEpoch + } + + // Execute the timeout abort process + coordinator.abortTimedOutTransactions(checkOnEndTransactionComplete) + + assertTrue(callbackInvoked, "Callback should have been invoked") + assertEquals(Errors.NONE, resultError, "Expected no errors in the callback") + assertEquals(producerId, resultProducerId, "Expected producer ID to match") + assertEquals(Short.MaxValue, resultEpoch, "Expected producer epoch to be Short.MaxValue (32767) single epoch bump") + + // Verify the transaction metadata was correctly updated to the final epoch + assertEquals(Short.MaxValue, txnMetadata.producerEpoch, + s"Expected transaction metadata producer epoch to be ${Short.MaxValue} " + + s"after timeout handling, but was ${txnMetadata.producerEpoch}" + ) + + // Verify the basic flow was attempted + verify(transactionManager).timedOutTransactions() + verify(transactionManager, atLeast(1)).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } + @Test def testInitProducerIdWithNoLastProducerData(): Unit = { // If the metadata doesn't include the previous producer data (for example, if it was written to the log by a broker // on an old version), the retry case should fail val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1295,7 +1434,7 @@ class TransactionCoordinatorTest { def testFenceProducerWhenMappingExistsWithDifferentProducerId(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId + 1, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1319,7 +1458,7 @@ class TransactionCoordinatorTest { mockPidGenerator() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1335,7 +1474,7 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState = None + txnMetadata.pendingState(util.Optional.empty()) }) // Re-initialization should succeed and bump the producer epoch @@ -1366,7 +1505,7 @@ class TransactionCoordinatorTest { mockPidGenerator() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1383,9 +1522,9 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState = None - txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch - txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch + txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) + txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) }) // With producer epoch at 10, new producer calls InitProducerId and should get epoch 11 @@ -1415,7 +1554,7 @@ class TransactionCoordinatorTest { def testRetryInitProducerIdAfterProducerIdRotation(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(pidGenerator.generateProducerId()) .thenReturn(producerId + 1) @@ -1434,11 +1573,11 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState = None - txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId - txnMetadata.prevProducerId = capturedTxnTransitMetadata.getValue.prevProducerId - txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch - txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch + txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.setProducerId(capturedTxnTransitMetadata.getValue.producerId) + txnMetadata.setPrevProducerId(capturedTxnTransitMetadata.getValue.prevProducerId) + txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) + txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) }) // Bump epoch and cause producer ID to be rotated @@ -1468,7 +1607,7 @@ class TransactionCoordinatorTest { def testInitProducerIdWithInvalidEpochAfterProducerIdRotation(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) when(pidGenerator.generateProducerId()) .thenReturn(producerId + 1) @@ -1487,11 +1626,11 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState = None - txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId - txnMetadata.prevProducerId = capturedTxnTransitMetadata.getValue.prevProducerId - txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch - txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch + txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.setProducerId(capturedTxnTransitMetadata.getValue.producerId) + txnMetadata.setPrevProducerId(capturedTxnTransitMetadata.getValue.prevProducerId) + txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) + txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) }) // Bump epoch and cause producer ID to be rotated @@ -1528,7 +1667,7 @@ class TransactionCoordinatorTest { def shouldAbortExpiredTransactionsInOngoingStateAndBumpEpoch(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1536,8 +1675,8 @@ class TransactionCoordinatorTest { .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. - val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.clone, now, + val expectedTransition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1567,7 +1706,7 @@ class TransactionCoordinatorTest { def shouldNotAcceptSmallerEpochDuringTransactionExpiration(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1577,7 +1716,7 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val bumpedTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 2).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 2).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, bumpedTxnMetadata)))) @@ -1593,8 +1732,8 @@ class TransactionCoordinatorTest { @Test def shouldNotAbortExpiredTransactionsThatHaveAPendingStateTransition(): Unit = { val metadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - metadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + metadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1612,13 +1751,13 @@ class TransactionCoordinatorTest { def shouldNotBumpEpochWhenAbortingExpiredTransactionIfAppendToLogFails(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) val txnMetadataAfterAppendFailure = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1626,8 +1765,8 @@ class TransactionCoordinatorTest { // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. val bumpedEpoch = (producerEpoch + 1).toShort - val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, bumpedEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.clone, now, + val expectedTransition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, bumpedEpoch, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1660,8 +1799,8 @@ class TransactionCoordinatorTest { @Test def shouldNotBumpEpochWithPendingTransaction(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) @@ -1695,7 +1834,7 @@ class TransactionCoordinatorTest { coordinator.startup(() => transactionStatePartitionCount, enableTransactionalIdExpiration = false) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Dead, mutable.Set.empty, time.milliseconds(), + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.DEAD, util.Set.of, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1721,7 +1860,7 @@ class TransactionCoordinatorTest { @Test def testDescribeTransactions(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1735,9 +1874,11 @@ class TransactionCoordinatorTest { assertEquals(txnTimeoutMs, result.transactionTimeoutMs) assertEquals(time.milliseconds(), result.transactionStartTimeMs) - val addedPartitions = result.topics.asScala.flatMap { topicData => - topicData.partitions.asScala.map(partition => new TopicPartition(topicData.topic, partition)) - }.toSet + val addedPartitions = result.topics.stream.flatMap(topicData => + topicData.partitions.stream + .map(partition => new TopicPartition(topicData.topic, partition)) + ) + .collect(util.stream.Collectors.toSet()); assertEquals(partitions, addedPartitions) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) @@ -1747,9 +1888,9 @@ class TransactionCoordinatorTest { when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) .thenReturn(true) - // Since the clientTransactionVersion doesn't matter, use 2 since the states are PrepareCommit and PrepareAbort. + // Since the clientTransactionVersion doesn't matter, use 2 since the states are TransactionState.PREPARE_COMMIT and TransactionState.PREPARE_ABORT. val metadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_EPOCH, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, mutable.Set[TopicPartition](new TopicPartition("topic", 1)), 0, 0, TV_2) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, util.Set.of[TopicPartition](new TopicPartition("topic", 1)), 0, 0, TV_2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata)))) @@ -1768,7 +1909,7 @@ class TransactionCoordinatorTest { .thenReturn(true) val metadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, state, mutable.Set.empty[TopicPartition], time.milliseconds(), time.milliseconds(), clientTransactionVersion) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, state, util.Set.of, time.milliseconds(), time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata)))) @@ -1799,10 +1940,10 @@ class TransactionCoordinatorTest { private def mockPrepare(transactionState: TransactionState, clientTransactionVersion: TransactionVersion, runCallback: Boolean = false): TransactionMetadata = { val now = time.milliseconds() val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) - val transition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions.clone, now, now, clientTransactionVersion) + val transition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions, now, now, clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, originalMetadata)))) @@ -1842,4 +1983,153 @@ class TransactionCoordinatorTest { else producerEpoch } + + @Test + def testTV2AllowsEpochReBumpingAfterFailedWrite(): Unit = { + // Test the complete TV2 flow: failed write → epoch fence → abort → retry with epoch bump + // This demonstrates that TV2 allows epoch re-bumping after failed writes (unlike TV1) + val producerEpoch = 1.toShort + val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_2) + + when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + .thenReturn(true) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) + + // First attempt fails with COORDINATOR_NOT_AVAILABLE + when(transactionManager.appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + any(), + any(), + any(), + any() + )).thenAnswer(invocation => { + val callback = invocation.getArgument[Errors => Unit](3) + + // Simulate the real TransactionStateManager behavior: reset pendingState on failure + // since handleInitProducerId doesn't provide a custom retryOnError function + txnMetadata.pendingState(util.Optional.empty()) + + // For TV2, hasFailedEpochFence is NOT set to true, allowing epoch bumps on retry + // The epoch remains at its original value (1) since completeTransitionTo was never called + + callback.apply(Errors.COORDINATOR_NOT_AVAILABLE) + }) + + coordinator.handleInitProducerId( + transactionalId, + txnTimeoutMs, + enableTwoPCFlag = false, + keepPreparedTxn = false, + None, + initProducerIdMockCallback + ) + assertEquals(InitProducerIdResult(-1, -1, Errors.COORDINATOR_NOT_AVAILABLE), result) + + // After the first failed attempt, the state should be: + // - hasFailedEpochFence = false (NOT set for TV2) + // - pendingState = None (reset by TransactionStateManager) + // - producerEpoch = 1 (unchanged since completeTransitionTo was never called) + // - transaction still ONGOING + + // Second attempt: Should abort the ongoing transaction + reset(transactionManager) + when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + .thenReturn(true) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) + + // Mock the appendTransactionToLog to succeed for the endTransaction call + when(transactionManager.appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + any(), + any(), + any(), + any() + )).thenAnswer(invocation => { + val newMetadata = invocation.getArgument[TxnTransitMetadata](2) + val callback = invocation.getArgument[Errors => Unit](3) + + // Complete the transition and call the callback with success + txnMetadata.completeTransitionTo(newMetadata) + callback.apply(Errors.NONE) + }) + + // Mock the transactionMarkerChannelManager to simulate the second write (PREPARE_ABORT -> COMPLETE_ABORT) + doAnswer(invocation => { + val newMetadata = invocation.getArgument[TxnTransitMetadata](3) + // Simulate the completion of transaction markers and the second write + // This would normally happen asynchronously after markers are sent + txnMetadata.completeTransitionTo(newMetadata) // This transitions to COMPLETE_ABORT + txnMetadata.pendingState(util.Optional.empty()) + + null + }).when(transactionMarkerChannelManager).addTxnMarkersToSend( + ArgumentMatchers.eq(coordinatorEpoch), + ArgumentMatchers.eq(TransactionResult.ABORT), + ArgumentMatchers.eq(txnMetadata), + any() + ) + + coordinator.handleInitProducerId( + transactionalId, + txnTimeoutMs, + enableTwoPCFlag = false, + keepPreparedTxn = false, + None, + initProducerIdMockCallback + ) + + // The second attempt should return CONCURRENT_TRANSACTIONS (this is intentional) + assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) + + // The transactionMarkerChannelManager mock should have completed the transition to COMPLETE_ABORT + // Verify that hasFailedEpochFence was never set to true for TV2, allowing future epoch bumps + assertFalse(txnMetadata.hasFailedEpochFence) + + // Third attempt: Client retries after CONCURRENT_TRANSACTIONS + reset(transactionManager) + when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + .thenReturn(true) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) + + when(transactionManager.appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(coordinatorEpoch), + any(), + any(), + any(), + any() + )).thenAnswer(invocation => { + val newMetadata = invocation.getArgument[TxnTransitMetadata](2) + val callback = invocation.getArgument[Errors => Unit](3) + + // Complete the transition and call the callback with success + txnMetadata.completeTransitionTo(newMetadata) + callback.apply(Errors.NONE) + }) + + coordinator.handleInitProducerId( + transactionalId, + txnTimeoutMs, + enableTwoPCFlag = false, + keepPreparedTxn = false, + None, + initProducerIdMockCallback + ) + + // The third attempt should succeed with epoch 3 (2 + 1) + // This demonstrates that TV2 allows epoch re-bumping after failed writes + assertEquals(InitProducerIdResult(producerId, 3.toShort, Errors.NONE), result) + + // Final verification that hasFailedEpochFence was never set to true for TV2 + assertFalse(txnMetadata.hasFailedEpochFence) + } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala deleted file mode 100644 index 8a852d70cbe96..0000000000000 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.transaction - - -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} -import org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection -import org.apache.kafka.common.protocol.types.{CompactArrayOf, Field, Schema, Struct, Type} -import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} -import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} -import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} -import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue, fail} -import org.junit.jupiter.api.Test - -import java.nio.ByteBuffer -import scala.collection.{Seq, mutable} -import scala.jdk.CollectionConverters._ - -class TransactionLogTest { - - val producerEpoch: Short = 0 - val transactionTimeoutMs: Int = 1000 - - val topicPartitions: Set[TopicPartition] = Set[TopicPartition](new TopicPartition("topic1", 0), - new TopicPartition("topic1", 1), - new TopicPartition("topic2", 0), - new TopicPartition("topic2", 1), - new TopicPartition("topic2", 2)) - - @Test - def shouldThrowExceptionWriteInvalidTxn(): Unit = { - val transactionalId = "transactionalId" - val producerId = 23423L - - val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Empty, collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) - txnMetadata.addPartitions(topicPartitions) - - assertThrows(classOf[IllegalStateException], () => TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2)) - } - - @Test - def shouldReadWriteMessages(): Unit = { - val pidMappings = Map[String, Long]("zero" -> 0L, - "one" -> 1L, - "two" -> 2L, - "three" -> 3L, - "four" -> 4L, - "five" -> 5L) - - val transactionStates = Map[Long, TransactionState](0L -> Empty, - 1L -> Ongoing, - 2L -> PrepareCommit, - 3L -> CompleteCommit, - 4L -> PrepareAbort, - 5L -> CompleteAbort) - - // generate transaction log messages - val txnRecords = pidMappings.map { case (transactionalId, producerId) => - val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, transactionStates(producerId), collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) - - if (!txnMetadata.state.equals(Empty)) - txnMetadata.addPartitions(topicPartitions) - - val keyBytes = TransactionLog.keyToBytes(transactionalId) - val valueBytes = TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2) - - new SimpleRecord(keyBytes, valueBytes) - }.toSeq - - val records = MemoryRecords.withRecords(0, Compression.NONE, txnRecords: _*) - - var count = 0 - for (record <- records.records.asScala) { - TransactionLog.readTxnRecordKey(record.key) match { - case Left(version) => fail(s"Unexpected record version: $version") - case Right(transactionalId) => - val txnMetadata = TransactionLog.readTxnRecordValue(transactionalId, record.value).get - - assertEquals(pidMappings(transactionalId), txnMetadata.producerId) - assertEquals(producerEpoch, txnMetadata.producerEpoch) - assertEquals(transactionTimeoutMs, txnMetadata.txnTimeoutMs) - assertEquals(transactionStates(txnMetadata.producerId), txnMetadata.state) - - if (txnMetadata.state.equals(Empty)) - assertEquals(Set.empty[TopicPartition], txnMetadata.topicPartitions) - else - assertEquals(topicPartitions, txnMetadata.topicPartitions) - - count = count + 1 - } - } - - assertEquals(pidMappings.size, count) - } - - @Test - def testSerializeTransactionLogValueToHighestNonFlexibleVersion(): Unit = { - val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, mutable.Set.empty, 500, 500, TV_0) - val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_0)) - assertEquals(0, txnLogValueBuffer.getShort) - } - - @Test - def testSerializeTransactionLogValueToFlexibleVersion(): Unit = { - val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, mutable.Set.empty, 500, 500, TV_2) - val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_2)) - assertEquals(TransactionLogValue.HIGHEST_SUPPORTED_VERSION, txnLogValueBuffer.getShort) - } - - @Test - def testDeserializeHighestSupportedTransactionLogValue(): Unit = { - val txnPartitions = new TransactionLogValue.PartitionsSchema() - .setTopic("topic") - .setPartitionIds(java.util.Collections.singletonList(0)) - - val txnLogValue = new TransactionLogValue() - .setProducerId(100) - .setProducerEpoch(50.toShort) - .setTransactionStatus(CompleteCommit.id) - .setTransactionStartTimestampMs(750L) - .setTransactionLastUpdateTimestampMs(1000L) - .setTransactionTimeoutMs(500) - .setTransactionPartitions(java.util.Collections.singletonList(txnPartitions)) - - val serialized = MessageUtil.toVersionPrefixedByteBuffer(1, txnLogValue) - val deserialized = TransactionLog.readTxnRecordValue("transactionId", serialized).get - - assertEquals(100, deserialized.producerId) - assertEquals(50, deserialized.producerEpoch) - assertEquals(CompleteCommit, deserialized.state) - assertEquals(750L, deserialized.txnStartTimestamp) - assertEquals(1000L, deserialized.txnLastUpdateTimestamp) - assertEquals(500, deserialized.txnTimeoutMs) - - val actualTxnPartitions = deserialized.topicPartitions - assertEquals(1, actualTxnPartitions.size) - assertTrue(actualTxnPartitions.contains(new TopicPartition("topic", 0))) - } - - @Test - def testDeserializeFutureTransactionLogValue(): Unit = { - // Copy of TransactionLogValue.PartitionsSchema.SCHEMA_1 with a few - // additional tagged fields. - val futurePartitionsSchema = new Schema( - new Field("topic", Type.COMPACT_STRING, ""), - new Field("partition_ids", new CompactArrayOf(Type.INT32), ""), - TaggedFieldsSection.of( - Int.box(100), new Field("partition_foo", Type.STRING, ""), - Int.box(101), new Field("partition_foo", Type.INT32, "") - ) - ) - - // Create TransactionLogValue.PartitionsSchema with tagged fields - val txnPartitions = new Struct(futurePartitionsSchema) - txnPartitions.set("topic", "topic") - txnPartitions.set("partition_ids", Array(Integer.valueOf(1))) - val txnPartitionsTaggedFields = new java.util.TreeMap[Integer, Any]() - txnPartitionsTaggedFields.put(100, "foo") - txnPartitionsTaggedFields.put(101, 4000) - txnPartitions.set("_tagged_fields", txnPartitionsTaggedFields) - - // Copy of TransactionLogValue.SCHEMA_1 with a few - // additional tagged fields. - val futureTransactionLogValueSchema = new Schema( - new Field("producer_id", Type.INT64, ""), - new Field("producer_epoch", Type.INT16, ""), - new Field("transaction_timeout_ms", Type.INT32, ""), - new Field("transaction_status", Type.INT8, ""), - new Field("transaction_partitions", CompactArrayOf.nullable(futurePartitionsSchema), ""), - new Field("transaction_last_update_timestamp_ms", Type.INT64, ""), - new Field("transaction_start_timestamp_ms", Type.INT64, ""), - TaggedFieldsSection.of( - Int.box(100), new Field("txn_foo", Type.STRING, ""), - Int.box(101), new Field("txn_bar", Type.INT32, "") - ) - ) - - // Create TransactionLogValue with tagged fields - val transactionLogValue = new Struct(futureTransactionLogValueSchema) - transactionLogValue.set("producer_id", 1000L) - transactionLogValue.set("producer_epoch", 100.toShort) - transactionLogValue.set("transaction_timeout_ms", 1000) - transactionLogValue.set("transaction_status", CompleteCommit.id) - transactionLogValue.set("transaction_partitions", Array(txnPartitions)) - transactionLogValue.set("transaction_last_update_timestamp_ms", 2000L) - transactionLogValue.set("transaction_start_timestamp_ms", 3000L) - val txnLogValueTaggedFields = new java.util.TreeMap[Integer, Any]() - txnLogValueTaggedFields.put(100, "foo") - txnLogValueTaggedFields.put(101, 4000) - transactionLogValue.set("_tagged_fields", txnLogValueTaggedFields) - - // Prepare the buffer. - val buffer = ByteBuffer.allocate(transactionLogValue.sizeOf() + 2) - buffer.put(0.toByte) - buffer.put(1.toByte) // Add 1 as version. - transactionLogValue.writeTo(buffer) - buffer.flip() - - // Read the buffer with the real schema and verify that tagged - // fields were read but ignored. - buffer.getShort() // Skip version. - val value = new TransactionLogValue(new ByteBufferAccessor(buffer), 1.toShort) - assertEquals(Seq(100, 101), value.unknownTaggedFields().asScala.map(_.tag)) - assertEquals(Seq(100, 101), value.transactionPartitions().get(0).unknownTaggedFields().asScala.map(_.tag)) - - // Read the buffer with readTxnRecordValue. - buffer.rewind() - val txnMetadata = TransactionLog.readTxnRecordValue("transaction-id", buffer).get - assertEquals(1000L, txnMetadata.producerId) - assertEquals(100, txnMetadata.producerEpoch) - assertEquals(1000L, txnMetadata.txnTimeoutMs) - assertEquals(CompleteCommit, txnMetadata.state) - assertEquals(Set(new TopicPartition("topic", 1)), txnMetadata.topicPartitions) - assertEquals(2000L, txnMetadata.txnLastUpdateTimestamp) - assertEquals(3000L, txnMetadata.txnStartTimestamp) - } - - @Test - def testReadTxnRecordKeyCanReadUnknownMessage(): Unit = { - val record = new TransactionLogKey() - val unknownRecord = MessageUtil.toVersionPrefixedBytes(Short.MaxValue, record) - TransactionLog.readTxnRecordKey(ByteBuffer.wrap(unknownRecord)) match { - case Left(version) => assertEquals(Short.MaxValue, version) - case Right(_) => fail("Expected to read unknown message") - } - } -} diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala index 321e6e793f474..7699d643a3ec0 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala @@ -17,8 +17,6 @@ package kafka.coordinator.transaction import java.util -import java.util.Arrays.asList -import java.util.Collections import java.util.Optional import java.util.concurrent.{Callable, Executors, Future} import kafka.server.KafkaConfig @@ -29,6 +27,7 @@ import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{RequestHeader, TransactionResult, WriteTxnMarkersRequest, WriteTxnMarkersResponse} import org.apache.kafka.common.utils.MockTime import org.apache.kafka.common.{Node, TopicPartition} +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{MetadataVersion, TransactionVersion} import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} @@ -42,7 +41,6 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{clearInvocations, mock, mockConstruction, times, verify, verifyNoMoreInteractions, when} import scala.jdk.CollectionConverters._ -import scala.collection.mutable import scala.util.Try class TransactionMarkerChannelManagerTest { @@ -68,9 +66,9 @@ class TransactionMarkerChannelManagerTest { private val txnTimeoutMs = 0 private val txnResult = TransactionResult.COMMIT private val txnMetadata1 = new TransactionMetadata(transactionalId1, producerId1, producerId1, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](partition1, partition2), 0L, 0L, TransactionVersion.TV_2) + producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(partition1, partition2), 0L, 0L, TransactionVersion.TV_2) private val txnMetadata2 = new TransactionMetadata(transactionalId2, producerId2, producerId2, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](partition1), 0L, 0L, TransactionVersion.TV_2) + producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(partition1), 0L, 0L, TransactionVersion.TV_2) private val capturedErrorsCallback: ArgumentCaptor[Errors => Unit] = ArgumentCaptor.forClass(classOf[Errors => Unit]) private val time = new MockTime @@ -146,33 +144,33 @@ class TransactionMarkerChannelManagerTest { var addMarkerFuture: Future[Try[Unit]] = null val executor = Executors.newFixedThreadPool(1) - txnMetadata2.lock.lock() try { - addMarkerFuture = executor.submit((() => { - Try(channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, + txnMetadata2.inLock(() => { + addMarkerFuture = executor.submit((() => { + Try(channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata2, expectedTransition)) - }): Callable[Try[Unit]]) - - val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) - val response = new WriteTxnMarkersResponse( - Collections.singletonMap(producerId2: java.lang.Long, Collections.singletonMap(partition1, Errors.NONE))) - val clientResponse = new ClientResponse(header, null, null, - time.milliseconds(), time.milliseconds(), false, null, null, - response) - - TestUtils.waitUntilTrue(() => { - val requests = channelManager.generateRequests().asScala - if (requests.nonEmpty) { - assertEquals(1, requests.size) - val request = requests.head - request.handler.onComplete(clientResponse) - true - } else { - false - } - }, "Timed out waiting for expected WriteTxnMarkers request") + }): Callable[Try[Unit]]) + + val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) + val response = new WriteTxnMarkersResponse( + util.Map.of(producerId2: java.lang.Long, util.Map.of(partition1, Errors.NONE))) + val clientResponse = new ClientResponse(header, null, null, + time.milliseconds(), time.milliseconds(), false, null, null, + response) + + TestUtils.waitUntilTrue(() => { + val requests = channelManager.generateRequests().asScala + if (requests.nonEmpty) { + assertEquals(1, requests.size) + val request = requests.head + request.handler.onComplete(clientResponse) + true + } else { + false + } + }, "Timed out waiting for expected WriteTxnMarkers request") + }) } finally { - txnMetadata2.lock.unlock() executor.shutdown() } @@ -204,7 +202,7 @@ class TransactionMarkerChannelManagerTest { // Build a successful client response. val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) val successfulResponse = new WriteTxnMarkersResponse( - Collections.singletonMap(producerId2: java.lang.Long, Collections.singletonMap(partition1, Errors.NONE))) + util.Map.of(producerId2: java.lang.Long, util.Map.of(partition1, Errors.NONE))) val successfulClientResponse = new ClientResponse(header, null, null, time.milliseconds(), time.milliseconds(), false, null, null, successfulResponse) @@ -301,10 +299,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition2)) val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( - asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() + util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)), + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)))).build() val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( - asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() + util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition2)))).build() val requests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => (handler.destination, handler.request.asInstanceOf[WriteTxnMarkersRequest.Builder].build()) @@ -371,10 +369,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition2)) val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( - asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() + util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)), + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)))).build() val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( - asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() + util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition2)))).build() val firstDrainedRequests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => (handler.destination, handler.request.asInstanceOf[WriteTxnMarkersRequest.Builder].build()) @@ -479,8 +477,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(None, txnMetadata2.pendingState) - assertEquals(CompleteCommit, txnMetadata2.state) + assertEquals(Optional.empty(), txnMetadata2.pendingState) + assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata2.state) } @Test @@ -508,7 +506,7 @@ class TransactionMarkerChannelManagerTest { any(), any())) .thenAnswer(_ => { - txnMetadata2.pendingState = None + txnMetadata2.pendingState(util.Optional.empty()) capturedErrorsCallback.getValue.apply(Errors.NOT_COORDINATOR) }) @@ -532,8 +530,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(None, txnMetadata2.pendingState) - assertEquals(PrepareCommit, txnMetadata2.state) + assertEquals(Optional.empty(), txnMetadata2.pendingState) + assertEquals(TransactionState.PREPARE_COMMIT, txnMetadata2.state) } @ParameterizedTest @@ -593,8 +591,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(None, txnMetadata2.pendingState) - assertEquals(CompleteCommit, txnMetadata2.state) + assertEquals(Optional.empty(), txnMetadata2.pendingState) + assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata2.state) } private def createPidErrorMap(errors: Errors): util.HashMap[java.lang.Long, util.Map[TopicPartition, Errors]] = { @@ -633,11 +631,11 @@ class TransactionMarkerChannelManagerTest { txnMetadata: TransactionMetadata ): Unit = { if (isTransactionV2Enabled) { - txnMetadata.clientTransactionVersion = TransactionVersion.TV_2 - txnMetadata.producerEpoch = (producerEpoch + 1).toShort - txnMetadata.lastProducerEpoch = producerEpoch + txnMetadata.clientTransactionVersion(TransactionVersion.TV_2) + txnMetadata.setProducerEpoch((producerEpoch + 1).toShort) + txnMetadata.setLastProducerEpoch(producerEpoch) } else { - txnMetadata.clientTransactionVersion = TransactionVersion.TV_1 + txnMetadata.clientTransactionVersion(TransactionVersion.TV_1) } } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala index 72ffa5629c04e..e955a9009ce9f 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala @@ -17,20 +17,18 @@ package kafka.coordinator.transaction import java.{lang, util} -import java.util.Arrays.asList import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{RequestHeader, TransactionResult, WriteTxnMarkersRequest, WriteTxnMarkersResponse} +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState} import org.apache.kafka.server.common.TransactionVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.ArgumentMatchers import org.mockito.Mockito.{mock, verify, when} -import scala.collection.mutable - class TransactionMarkerRequestCompletionHandlerTest { private val brokerId = 0 @@ -44,11 +42,11 @@ class TransactionMarkerRequestCompletionHandlerTest { private val txnResult = TransactionResult.COMMIT private val topicPartition = new TopicPartition("topic1", 0) private val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](topicPartition), 0L, 0L, TransactionVersion.TV_2) - private val pendingCompleteTxnAndMarkers = asList( + producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(topicPartition), 0L, 0L, TransactionVersion.TV_2) + private val pendingCompleteTxnAndMarkers = util.List.of( PendingCompleteTxnAndMarkerEntry( PendingCompleteTxn(transactionalId, coordinatorEpoch, txnMetadata, txnMetadata.prepareComplete(42)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId, producerEpoch, coordinatorEpoch, txnResult, asList(topicPartition)))) + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId, producerEpoch, coordinatorEpoch, txnResult, util.List.of(topicPartition)))) private val markerChannelManager: TransactionMarkerChannelManager = mock(classOf[TransactionMarkerChannelManager]) @@ -194,7 +192,7 @@ class TransactionMarkerRequestCompletionHandlerTest { handler.onComplete(new ClientResponse(new RequestHeader(ApiKeys.PRODUCE, 0, "client", 1), null, null, 0, 0, false, null, null, response)) - assertEquals(txnMetadata.topicPartitions, mutable.Set[TopicPartition](topicPartition)) + assertEquals(txnMetadata.topicPartitions, util.Set.of(topicPartition)) verify(markerChannelManager).addTxnMarkersToBrokerQueue(producerId, producerEpoch, txnResult, pendingCompleteTxnAndMarkers.get(0).pendingCompleteTxn, Set[TopicPartition](topicPartition)) diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala index 12536cddff731..87a18b18dc09b 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala @@ -19,6 +19,7 @@ package kafka.coordinator.transaction import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} import org.apache.kafka.server.common.TransactionVersion import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} import org.apache.kafka.server.util.MockTime @@ -27,7 +28,11 @@ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource +import java.util +import java.util.Optional + import scala.collection.mutable +import scala.jdk.CollectionConverters._ class TransactionMetadataTest { @@ -40,19 +45,20 @@ class TransactionMetadataTest { val producerEpoch = RecordBatch.NO_PRODUCER_EPOCH val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, None) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.empty()) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(0, txnMetadata.producerEpoch) @@ -64,19 +70,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, None) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.empty()) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -88,21 +95,22 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareIncrementProducerEpoch(30000, - None, time.milliseconds())) + Optional.empty, time.milliseconds())) } @Test @@ -110,20 +118,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = -1, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -135,20 +143,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = CompleteAbort, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = time.milliseconds() - 1, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.COMPLETE_ABORT, + util.Set.of, + time.milliseconds() - 1, + time.milliseconds(), + TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -160,20 +168,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = CompleteCommit, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = time.milliseconds() - 1, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.COMPLETE_COMMIT, + util.Set.of, + time.milliseconds() - 1, + time.milliseconds(), + TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -184,21 +192,21 @@ class TransactionMetadataTest { def testTolerateUpdateTimeShiftDuringEpochBump(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + 1L, + time.milliseconds(), + TV_0) // let new time be smaller - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Option(producerEpoch), + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch), Some(time.milliseconds() - 1)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) @@ -212,21 +220,21 @@ class TransactionMetadataTest { def testTolerateUpdateTimeResetDuringProducerIdRotation(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + 1L, + time.milliseconds(), + TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareProducerIdRotation(producerId + 1, 30000, time.milliseconds() - 1, recordLastEpoch = true) + val transitMetadata = txnMetadata.prepareProducerIdRotation(producerId + 1, 30000, time.milliseconds() - 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId + 1, txnMetadata.producerId) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) @@ -239,33 +247,33 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringAddPartitions(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = time.milliseconds(), - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - // let new time be smaller; when transiting from Empty the start time would be updated to the update-time - var transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0)), time.milliseconds() - 1, TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + time.milliseconds(), + time.milliseconds(), + TV_0) + + // let new time be smaller; when transiting from TransactionState.EMPTY the start time would be updated to the update-time + var transitMetadata = txnMetadata.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0)), time.milliseconds() - 1, TV_0) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0)), txnMetadata.topicPartitions) + assertEquals(util.Set.of(new TopicPartition("topic1", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) assertEquals(time.milliseconds() - 1, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) - // add another partition, check that in Ongoing state the start timestamp would not change to update time - transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds() - 2, TV_0) + // add another partition, check that in TransactionState.ONGOING state the start timestamp would not change to update time + transitMetadata = txnMetadata.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds() - 2, TV_0) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic2", 0)), txnMetadata.topicPartitions) + assertEquals(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic2", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -277,23 +285,23 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringPrepareCommit(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + 1L, + time.milliseconds(), + TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(PrepareCommit, txnMetadata.state) + assertEquals(TransactionState.PREPARE_COMMIT, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -305,23 +313,23 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringPrepareAbort(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + 1L, + time.milliseconds(), + TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(PrepareAbort, txnMetadata.state) + assertEquals(TransactionState.PREPARE_ABORT, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -336,25 +344,25 @@ class TransactionMetadataTest { val producerEpoch: Short = 1 val lastProducerEpoch: Short = 0 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = lastProducerEpoch, - txnTimeoutMs = 30000, - state = PrepareCommit, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = clientTransactionVersion + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + lastProducerEpoch, + 30000, + TransactionState.PREPARE_COMMIT, + util.Set.of(), + 1L, + time.milliseconds(), + clientTransactionVersion ) // let new time be smaller val transitMetadata = txnMetadata.prepareComplete(time.milliseconds() - 1) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(CompleteCommit, txnMetadata.state) + assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -369,25 +377,25 @@ class TransactionMetadataTest { val producerEpoch: Short = 1 val lastProducerEpoch: Short = 0 val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = lastProducerEpoch, - txnTimeoutMs = 30000, - state = PrepareAbort, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = 1L, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = clientTransactionVersion + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + lastProducerEpoch, + 30000, + TransactionState.PREPARE_ABORT, + util.Set.of, + 1L, + time.milliseconds(), + clientTransactionVersion ) // let new time be smaller val transitMetadata = txnMetadata.prepareComplete(time.milliseconds() - 1) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(CompleteAbort, txnMetadata.state) + assertEquals(TransactionState.COMPLETE_ABORT, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -400,28 +408,29 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + -1, + time.milliseconds(), + TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) val fencingTransitMetadata = txnMetadata.prepareFenceProducerEpoch() assertEquals(Short.MaxValue, fencingTransitMetadata.producerEpoch) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, fencingTransitMetadata.lastProducerEpoch) - assertEquals(Some(PrepareEpochFence), txnMetadata.pendingState) + assertEquals(Optional.of(TransactionState.PREPARE_EPOCH_FENCE), txnMetadata.pendingState) // We should reset the pending state to make way for the abort transition. - txnMetadata.pendingState = None + txnMetadata.pendingState(Optional.empty()) - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, transitMetadata.producerId) } @@ -431,17 +440,18 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = CompleteCommit, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.COMPLETE_COMMIT, + util.Set.of, + -1, + time.milliseconds(), + TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) @@ -452,17 +462,18 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = CompleteAbort, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.COMPLETE_ABORT, + util.Set.of, + -1, + time.milliseconds(), + TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) @@ -473,17 +484,18 @@ class TransactionMetadataTest { val producerEpoch = Short.MaxValue val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + -1, + time.milliseconds(), + TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) } @@ -493,20 +505,21 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) val newProducerId = 9893L - val transitMetadata = txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), recordLastEpoch = true) + val transitMetadata = txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(newProducerId, txnMetadata.producerId) assertEquals(producerId, txnMetadata.prevProducerId) @@ -520,20 +533,20 @@ class TransactionMetadataTest { val producerEpoch = 10.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = time.milliseconds(), - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_2) - - var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + time.milliseconds(), + time.milliseconds(), + TV_2) + + var transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) @@ -552,22 +565,22 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Ongoing, - topicPartitions = mutable.Set.empty, - txnStartTimestamp = time.milliseconds(), - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_2) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.ONGOING, + util.Set.of, + time.milliseconds(), + time.milliseconds(), + TV_2) assertTrue(txnMetadata.isProducerEpochExhausted) val newProducerId = 9893L - var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, newProducerId, time.milliseconds() - 1, false) + var transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, newProducerId, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(Short.MaxValue, txnMetadata.producerEpoch) @@ -584,21 +597,21 @@ class TransactionMetadataTest { @Test def testRotateProducerIdInOngoingState(): Unit = { - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(Ongoing, TV_0)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.ONGOING, TV_0)) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def testRotateProducerIdInPrepareAbortState(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(PrepareAbort, clientTransactionVersion)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.PREPARE_ABORT, clientTransactionVersion)) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def testRotateProducerIdInPrepareCommitState(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(PrepareCommit, clientTransactionVersion)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.PREPARE_COMMIT, clientTransactionVersion)) } @Test @@ -606,19 +619,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(producerEpoch)) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(0, txnMetadata.producerEpoch) @@ -630,19 +644,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(producerEpoch)) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -655,19 +670,20 @@ class TransactionMetadataTest { val lastProducerEpoch = (producerEpoch - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = RecordBatch.NO_PRODUCER_ID, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = lastProducerEpoch, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(lastProducerEpoch)) + transactionalId, + producerId, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + lastProducerEpoch, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(lastProducerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -680,32 +696,34 @@ class TransactionMetadataTest { val lastProducerEpoch = (producerEpoch - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = producerId, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = lastProducerEpoch, - txnTimeoutMs = 30000, - state = Empty, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = TV_0) - - val result = txnMetadata.prepareIncrementProducerEpoch(30000, Some((lastProducerEpoch - 1).toShort), - time.milliseconds()) - assertEquals(Left(Errors.PRODUCER_FENCED), result) + transactionalId, + producerId, + producerId, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + lastProducerEpoch, + 30000, + TransactionState.EMPTY, + util.Set.of, + -1, + time.milliseconds(), + TV_0) + + assertThrows(Errors.PRODUCER_FENCED.exception().getClass, () => + txnMetadata.prepareIncrementProducerEpoch(30000, Optional.of((lastProducerEpoch - 1).toShort), + time.milliseconds()) + ) } @Test def testTransactionStateIdAndNameMapping(): Unit = { - for (state <- TransactionState.AllStates) { + for (state <- TransactionState.ALL_STATES.asScala) { assertEquals(state, TransactionState.fromId(state.id)) - assertEquals(Some(state), TransactionState.fromName(state.name)) + assertEquals(Optional.of(state), TransactionState.fromName(state.stateName)) - if (state != Dead) { - val clientTransactionState = org.apache.kafka.clients.admin.TransactionState.parse(state.name) - assertEquals(state.name, clientTransactionState.toString) + if (state != TransactionState.DEAD) { + val clientTransactionState = org.apache.kafka.clients.admin.TransactionState.parse(state.stateName) + assertEquals(state.stateName, clientTransactionState.toString) assertNotEquals(org.apache.kafka.clients.admin.TransactionState.UNKNOWN, clientTransactionState) } } @@ -714,27 +732,27 @@ class TransactionMetadataTest { @Test def testAllTransactionStatesAreMapped(): Unit = { val unmatchedStates = mutable.Set( - Empty, - Ongoing, - PrepareCommit, - PrepareAbort, - CompleteCommit, - CompleteAbort, - PrepareEpochFence, - Dead + TransactionState.EMPTY, + TransactionState.ONGOING, + TransactionState.PREPARE_COMMIT, + TransactionState.PREPARE_ABORT, + TransactionState.COMPLETE_COMMIT, + TransactionState.COMPLETE_ABORT, + TransactionState.PREPARE_EPOCH_FENCE, + TransactionState.DEAD ) // The exhaustive match is intentional here to ensure that we are // forced to update the test case if a new state is added. - TransactionState.AllStates.foreach { - case Empty => assertTrue(unmatchedStates.remove(Empty)) - case Ongoing => assertTrue(unmatchedStates.remove(Ongoing)) - case PrepareCommit => assertTrue(unmatchedStates.remove(PrepareCommit)) - case PrepareAbort => assertTrue(unmatchedStates.remove(PrepareAbort)) - case CompleteCommit => assertTrue(unmatchedStates.remove(CompleteCommit)) - case CompleteAbort => assertTrue(unmatchedStates.remove(CompleteAbort)) - case PrepareEpochFence => assertTrue(unmatchedStates.remove(PrepareEpochFence)) - case Dead => assertTrue(unmatchedStates.remove(Dead)) + TransactionState.ALL_STATES.asScala.foreach { + case TransactionState.EMPTY => assertTrue(unmatchedStates.remove(TransactionState.EMPTY)) + case TransactionState.ONGOING => assertTrue(unmatchedStates.remove(TransactionState.ONGOING)) + case TransactionState.PREPARE_COMMIT => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_COMMIT)) + case TransactionState.PREPARE_ABORT => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_ABORT)) + case TransactionState.COMPLETE_COMMIT => assertTrue(unmatchedStates.remove(TransactionState.COMPLETE_COMMIT)) + case TransactionState.COMPLETE_ABORT => assertTrue(unmatchedStates.remove(TransactionState.COMPLETE_ABORT)) + case TransactionState.PREPARE_EPOCH_FENCE => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_EPOCH_FENCE)) + case TransactionState.DEAD => assertTrue(unmatchedStates.remove(TransactionState.DEAD)) } assertEquals(Set.empty, unmatchedStates) @@ -744,27 +762,26 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId = transactionalId, - producerId = producerId, - prevProducerId = producerId, - nextProducerId = RecordBatch.NO_PRODUCER_ID, - producerEpoch = producerEpoch, - lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs = 30000, - state = state, - topicPartitions = mutable.Set.empty, - txnLastUpdateTimestamp = time.milliseconds(), - clientTransactionVersion = clientTransactionVersion) + transactionalId, + producerId, + producerId, + RecordBatch.NO_PRODUCER_ID, + producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, + 30000, + state, + util.Set.of, + -1, + time.milliseconds(), + clientTransactionVersion) val newProducerId = 9893L - txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), recordLastEpoch = false) + txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), false) } private def prepareSuccessfulIncrementProducerEpoch(txnMetadata: TransactionMetadata, - expectedProducerEpoch: Option[Short], + expectedProducerEpoch: Optional[java.lang.Short], now: Option[Long] = None): TxnTransitMetadata = { - val result = txnMetadata.prepareIncrementProducerEpoch(30000, expectedProducerEpoch, - now.getOrElse(time.milliseconds())) - result.getOrElse(throw new AssertionError(s"prepareIncrementProducerEpoch failed with $result")) + txnMetadata.prepareIncrementProducerEpoch(30000, expectedProducerEpoch, now.getOrElse(time.milliseconds())) } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala index e7ad10140f62a..41ee3f7f4cc90 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala @@ -18,13 +18,13 @@ package kafka.coordinator.transaction import java.lang.management.ManagementFactory import java.nio.ByteBuffer -import java.util.concurrent.CountDownLatch -import java.util.concurrent.locks.ReentrantLock +import java.util.concurrent.{ConcurrentHashMap, CountDownLatch} import javax.management.ObjectName import kafka.server.ReplicaManager -import kafka.utils.{Pool, TestUtils} -import org.apache.kafka.common.TopicPartition +import kafka.utils.TestUtils +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.errors.InvalidRegularExpression import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME import org.apache.kafka.common.metrics.{JmxReporter, KafkaMetricsContext, Metrics} import org.apache.kafka.common.protocol.{Errors, MessageUtil} @@ -32,6 +32,7 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.TransactionResult import org.apache.kafka.common.utils.MockTime +import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} @@ -48,7 +49,7 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.ArgumentMatchers.{any, anyInt, anyLong, anyShort} import org.mockito.Mockito.{atLeastOnce, mock, reset, times, verify, when} -import java.util.Collections +import java.util import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ @@ -58,6 +59,7 @@ class TransactionStateManagerTest { val numPartitions = 2 val transactionTimeoutMs: Int = 1000 val topicPartition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) + val transactionTopicId = Uuid.randomUuid() val coordinatorEpoch = 10 val txnRecords: mutable.ArrayBuffer[SimpleRecord] = mutable.ArrayBuffer[SimpleRecord]() @@ -70,7 +72,7 @@ class TransactionStateManagerTest { when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - Collections.singletonMap(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), + util.Map.of(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), 0) } @@ -96,6 +98,8 @@ class TransactionStateManagerTest { // make sure the transactional id hashes to the assigning partition id assertEquals(partitionId, transactionManager.partitionFor(transactionalId1)) assertEquals(partitionId, transactionManager.partitionFor(transactionalId2)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) } @AfterEach @@ -119,7 +123,7 @@ class TransactionStateManagerTest { @Test def testAddGetPids(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) assertEquals(Right(None), transactionManager.getTransactionState(transactionalId1)) assertEquals(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1)), @@ -138,10 +142,10 @@ class TransactionStateManagerTest { assertEquals(0, transactionManager.partitionFor(metadata1.transactionalId)) assertEquals(1, transactionManager.partitionFor(metadata2.transactionalId)) - transactionManager.addLoadedTransactionsToCache(0, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(0, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(metadata1) - transactionManager.addLoadedTransactionsToCache(1, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(1, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(metadata2) def cachedProducerEpoch(transactionalId: String): Option[Short] = { @@ -177,8 +181,8 @@ class TransactionStateManagerTest { ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) - txnMetadata1.state = PrepareCommit - txnMetadata1.addPartitions(Set[TopicPartition]( + txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.addPartitions(util.Set.of( new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) val records = MemoryRecords.withRecords(startOffset, Compression.NONE, @@ -236,8 +240,8 @@ class TransactionStateManagerTest { ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) - txnMetadata1.state = PrepareCommit - txnMetadata1.addPartitions(Set[TopicPartition]( + txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.addPartitions(util.Set.of( new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) val records = MemoryRecords.withRecords(startOffset, Compression.NONE, @@ -281,44 +285,44 @@ class TransactionStateManagerTest { // generate transaction log messages for two pids traces: // pid1's transaction started with two partitions - txnMetadata1.state = Ongoing - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + txnMetadata1.state(TransactionState.ONGOING) + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid1's transaction adds three more partitions - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic2", 0), + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic2", 0), new TopicPartition("topic2", 1), new TopicPartition("topic2", 2))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid1's transaction is preparing to commit - txnMetadata1.state = PrepareCommit + txnMetadata1.state(TransactionState.PREPARE_COMMIT) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid2's transaction started with three partitions - txnMetadata2.state = Ongoing - txnMetadata2.addPartitions(Set[TopicPartition](new TopicPartition("topic3", 0), + txnMetadata2.state(TransactionState.ONGOING) + txnMetadata2.addPartitions(util.Set.of(new TopicPartition("topic3", 0), new TopicPartition("topic3", 1), new TopicPartition("topic3", 2))) txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's transaction is preparing to abort - txnMetadata2.state = PrepareAbort + txnMetadata2.state(TransactionState.PREPARE_ABORT) txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's transaction has aborted - txnMetadata2.state = CompleteAbort + txnMetadata2.state(TransactionState.COMPLETE_ABORT) txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's epoch has advanced, with no ongoing transaction yet - txnMetadata2.state = Empty + txnMetadata2.state(TransactionState.EMPTY) txnMetadata2.topicPartitions.clear() txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) @@ -378,7 +382,7 @@ class TransactionStateManagerTest { @Test def testCompleteTransitionWhenAppendSucceeded(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) // first insert the initial transaction metadata transactionManager.putTransactionStateIfNotExists(txnMetadata1) @@ -387,7 +391,7 @@ class TransactionStateManagerTest { expectedError = Errors.NONE // update the metadata to ongoing with two partitions - val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // append the new metadata into log @@ -399,11 +403,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToCoordinatorNotAvailableError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -411,19 +415,19 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.REQUEST_TIMED_OUT) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -432,11 +436,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToNotCoordinatorError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.NOT_COORDINATOR - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_LEADER_OR_FOLLOWER) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -444,30 +448,30 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch + 1, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch + 1, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) } @Test def testAppendFailToCoordinatorLoadingError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_LOAD_IN_PROGRESS - val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + val failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) @@ -477,11 +481,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToUnknownError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.UNKNOWN_SERVER_ERROR - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.MESSAGE_TOO_LARGE) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -489,7 +493,7 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.RECORD_LIST_TOO_LARGE) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -498,21 +502,21 @@ class TransactionStateManagerTest { @Test def testPendingStateNotResetOnRetryAppend(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + val failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, _ => true, RequestLocal.withThreadConfinedCaching) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) - assertEquals(Some(Ongoing), txnMetadata1.pendingState) + assertEquals(util.Optional.of(TransactionState.ONGOING), txnMetadata1.pendingState) } @Test def testAppendTransactionToLogWhileProducerFenced(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) // first insert the initial transaction metadata transactionManager.putTransactionStateIfNotExists(txnMetadata1) @@ -520,11 +524,11 @@ class TransactionStateManagerTest { prepareForTxnMessageAppend(Errors.NONE) expectedError = Errors.NOT_COORDINATOR - val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata - txnMetadata1.producerEpoch = (txnMetadata1.producerEpoch + 1).toShort + txnMetadata1.setProducerEpoch((txnMetadata1.producerEpoch + 1).toShort) // append the new metadata into log transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, newMetadata, assertCallback, requestLocal = RequestLocal.withThreadConfinedCaching) @@ -533,17 +537,17 @@ class TransactionStateManagerTest { @Test def testAppendTransactionToLogWhilePendingStateChanged(): Unit = { // first insert the initial transaction metadata - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) prepareForTxnMessageAppend(Errors.NONE) expectedError = Errors.INVALID_PRODUCER_EPOCH - val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata - txnMetadata1.pendingState = None + txnMetadata1.pendingState(util.Optional.empty()) // append the new metadata into log assertThrows(classOf[IllegalStateException], () => transactionManager.appendTransactionToLog(transactionalId1, @@ -564,7 +568,8 @@ class TransactionStateManagerTest { val listResponse = transactionManager.listTransactionStates( filterProducerIds = Set.empty, filterStateNames = Set.empty, - -1L + -1L, + null ) assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, Errors.forCode(listResponse.errorCode)) } @@ -572,7 +577,7 @@ class TransactionStateManagerTest { @Test def testListTransactionsFiltering(): Unit = { for (partitionId <- 0 until numPartitions) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) } def putTransaction( @@ -586,42 +591,46 @@ class TransactionStateManagerTest { } } - putTransaction(transactionalId = "t0", producerId = 0, state = Ongoing) - putTransaction(transactionalId = "t1", producerId = 1, state = Ongoing) + putTransaction(transactionalId = "t0", producerId = 0, state = TransactionState.ONGOING) + putTransaction(transactionalId = "t1", producerId = 1, state = TransactionState.ONGOING) + putTransaction(transactionalId = "my-special-0", producerId = 0, state = TransactionState.ONGOING) // update time to create transactions with various durations time.sleep(1000) - putTransaction(transactionalId = "t2", producerId = 2, state = PrepareCommit) - putTransaction(transactionalId = "t3", producerId = 3, state = PrepareAbort) + putTransaction(transactionalId = "t2", producerId = 2, state = TransactionState.PREPARE_COMMIT) + putTransaction(transactionalId = "t3", producerId = 3, state = TransactionState.PREPARE_ABORT) + putTransaction(transactionalId = "your-special-1", producerId = 0, state = TransactionState.PREPARE_ABORT) time.sleep(1000) - putTransaction(transactionalId = "t4", producerId = 4, state = CompleteCommit) - putTransaction(transactionalId = "t5", producerId = 5, state = CompleteAbort) - putTransaction(transactionalId = "t6", producerId = 6, state = CompleteAbort) - putTransaction(transactionalId = "t7", producerId = 7, state = PrepareEpochFence) + putTransaction(transactionalId = "t4", producerId = 4, state = TransactionState.COMPLETE_COMMIT) + putTransaction(transactionalId = "t5", producerId = 5, state = TransactionState.COMPLETE_ABORT) + putTransaction(transactionalId = "t6", producerId = 6, state = TransactionState.COMPLETE_ABORT) + putTransaction(transactionalId = "t7", producerId = 7, state = TransactionState.PREPARE_EPOCH_FENCE) + putTransaction(transactionalId = "their-special-2", producerId = 7, state = TransactionState.COMPLETE_ABORT) time.sleep(1000) - // Note that `Dead` transactions are never returned. This is a transient state + // Note that `TransactionState.DEAD` transactions are never returned. This is a transient state // which is used when the transaction state is in the process of being deleted // (whether though expiration or coordinator unloading). - putTransaction(transactionalId = "t8", producerId = 8, state = Dead) + putTransaction(transactionalId = "t8", producerId = 8, state = TransactionState.DEAD) def assertListTransactions( expectedTransactionalIds: Set[String], filterProducerIds: Set[Long] = Set.empty, filterStates: Set[String] = Set.empty, - filterDuration: Long = -1L + filterDuration: Long = -1L, + filteredTransactionalIdPattern: String = null ): Unit = { - val listResponse = transactionManager.listTransactionStates(filterProducerIds, filterStates, filterDuration) + val listResponse = transactionManager.listTransactionStates(filterProducerIds, filterStates, filterDuration, filteredTransactionalIdPattern) assertEquals(Errors.NONE, Errors.forCode(listResponse.errorCode)) assertEquals(expectedTransactionalIds, listResponse.transactionStates.asScala.map(_.transactionalId).toSet) val expectedUnknownStates = filterStates.filter(state => TransactionState.fromName(state).isEmpty) assertEquals(expectedUnknownStates, listResponse.unknownStateFilters.asScala.toSet) } - assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7")) - assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7"), filterDuration = 0L) - assertListTransactions(Set("t0", "t1", "t2", "t3"), filterDuration = 1000L) - assertListTransactions(Set("t0", "t1"), filterDuration = 2000L) + assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "my-special-0", "your-special-1", "their-special-2")) + assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "my-special-0", "your-special-1", "their-special-2"), filterDuration = 0L) + assertListTransactions(Set("t0", "t1", "t2", "t3", "my-special-0", "your-special-1"), filterDuration = 1000L) + assertListTransactions(Set("t0", "t1", "my-special-0"), filterDuration = 2000L) assertListTransactions(Set(), filterDuration = 3000L) - assertListTransactions(Set("t0", "t1"), filterStates = Set("Ongoing")) - assertListTransactions(Set("t0", "t1"), filterStates = Set("Ongoing", "UnknownState")) + assertListTransactions(Set("t0", "t1", "my-special-0"), filterStates = Set("Ongoing")) + assertListTransactions(Set("t0", "t1", "my-special-0"), filterStates = Set("Ongoing", "UnknownState")) assertListTransactions(Set("t2", "t4"), filterStates = Set("PrepareCommit", "CompleteCommit")) assertListTransactions(Set(), filterStates = Set("UnknownState")) assertListTransactions(Set("t5"), filterProducerIds = Set(5L)) @@ -631,20 +640,29 @@ class TransactionStateManagerTest { assertListTransactions(Set(), filterProducerIds = Set(3L, 6L), filterStates = Set("UnknownState")) assertListTransactions(Set(), filterProducerIds = Set(10L), filterStates = Set("CompleteCommit")) assertListTransactions(Set(), filterStates = Set("Dead")) + assertListTransactions(Set("my-special-0", "your-special-1", "their-special-2"), filteredTransactionalIdPattern = ".*special-.*") + assertListTransactions(Set(), filteredTransactionalIdPattern = "nothing") + assertListTransactions(Set("my-special-0", "your-special-1"), filterDuration = 1000L, filteredTransactionalIdPattern = ".*special-.*") + assertListTransactions(Set("their-special-2"), filterProducerIds = Set(7L), filterStates = Set("CompleteCommit", "CompleteAbort"), filteredTransactionalIdPattern = ".*special-.*") + } + + @Test + def testListTransactionsFilteringWithInvalidPattern(): Unit = { + assertThrows(classOf[InvalidRegularExpression], () => transactionManager.listTransactionStates(Set.empty, Set.empty, -1L, "(ab(cd")) } @Test def shouldOnlyConsiderTransactionsInTheOngoingStateToAbort(): Unit = { for (partitionId <- 0 until numPartitions) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) } - transactionManager.putTransactionStateIfNotExists(transactionMetadata("ongoing", producerId = 0, state = Ongoing)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("not-expiring", producerId = 1, state = Ongoing, txnTimeout = 10000)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-commit", producerId = 2, state = PrepareCommit)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-abort", producerId = 3, state = PrepareAbort)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-commit", producerId = 4, state = CompleteCommit)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-abort", producerId = 5, state = CompleteAbort)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("ongoing", producerId = 0, state = TransactionState.ONGOING)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("not-expiring", producerId = 1, state = TransactionState.ONGOING, txnTimeout = 10000)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-commit", producerId = 2, state = TransactionState.PREPARE_COMMIT)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-abort", producerId = 3, state = TransactionState.PREPARE_ABORT)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-commit", producerId = 4, state = TransactionState.COMPLETE_COMMIT)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-abort", producerId = 5, state = TransactionState.COMPLETE_ABORT)) time.sleep(2000) val expiring = transactionManager.timedOutTransactions() @@ -653,59 +671,59 @@ class TransactionStateManagerTest { @Test def shouldWriteTxnMarkersForTransactionInPreparedCommitState(): Unit = { - verifyWritesTxnMarkersInPrepareState(PrepareCommit) + verifyWritesTxnMarkersInPrepareState(TransactionState.PREPARE_COMMIT) } @Test def shouldWriteTxnMarkersForTransactionInPreparedAbortState(): Unit = { - verifyWritesTxnMarkersInPrepareState(PrepareAbort) + verifyWritesTxnMarkersInPrepareState(TransactionState.PREPARE_ABORT) } @Test def shouldRemoveCompleteCommitExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteCommit) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.COMPLETE_COMMIT) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldRemoveCompleteAbortExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteAbort) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.COMPLETE_ABORT) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldRemoveEmptyExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, Empty) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.EMPTY) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemoveExpiredTransactionalIdsIfLogAppendFails(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NOT_ENOUGH_REPLICAS, CompleteAbort) + setupAndRunTransactionalIdExpiration(Errors.NOT_ENOUGH_REPLICAS, TransactionState.COMPLETE_ABORT) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemoveOngoingTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, Ongoing) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.ONGOING) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemovePrepareAbortTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareAbort) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.PREPARE_ABORT) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemovePrepareCommitTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareCommit) + setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.PREPARE_COMMIT) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @@ -725,10 +743,12 @@ class TransactionStateManagerTest { reset(replicaManager) expectLogConfig(partitionIds, maxBatchSize) - val attemptedAppends = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] + val attemptedAppends = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.MESSAGE_TOO_LARGE, attemptedAppends) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) transactionManager.removeExpiredTransactionalIds() verify(replicaManager, atLeastOnce()).appendRecords( anyLong(), @@ -737,7 +757,6 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), - any[Option[ReentrantLock]], any(), any(), any() @@ -768,8 +787,9 @@ class TransactionStateManagerTest { // No log config returned for partition 0 since it is offline when(replicaManager.getLogConfig(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, offlinePartitionId))) .thenReturn(None) - - val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) + val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) @@ -781,7 +801,6 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), - any[Option[ReentrantLock]], any(), any(), any() @@ -810,10 +829,13 @@ class TransactionStateManagerTest { reset(replicaManager) expectLogConfig(partitionIds, maxBatchSize) - val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) + transactionManager.removeExpiredTransactionalIds() verify(replicaManager, atLeastOnce()).appendRecords( anyLong(), @@ -822,7 +844,6 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), - any[Option[ReentrantLock]], any(), any(), any()) @@ -855,15 +876,17 @@ class TransactionStateManagerTest { // will be expired and it should succeed. val timestamp = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, 1, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, - RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Empty, collection.mutable.Set.empty[TopicPartition], timestamp, timestamp, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, TransactionState.EMPTY, util.Set.of, timestamp, timestamp, TV_0) transactionManager.putTransactionStateIfNotExists(txnMetadata) time.sleep(txnConfig.transactionalIdExpirationMs + 1) reset(replicaManager) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) expectLogConfig(partitionIds, maxBatchSize) - val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) transactionManager.removeExpiredTransactionalIds() @@ -874,7 +897,6 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), - any[Option[ReentrantLock]], any(), any(), any() @@ -885,7 +907,7 @@ class TransactionStateManagerTest { } private def collectTransactionalIdsFromTombstones( - appendedRecords: mutable.Map[TopicPartition, mutable.Buffer[MemoryRecords]] + appendedRecords: mutable.Map[TopicIdPartition, mutable.Buffer[MemoryRecords]] ): Set[String] = { val expiredTransactionalIds = mutable.Set.empty[String] appendedRecords.values.foreach { batches => @@ -912,7 +934,7 @@ class TransactionStateManagerTest { val txnlId = s"id_$i" val producerId = i val txnMetadata = transactionMetadata(txnlId, producerId) - txnMetadata.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs + txnMetadata.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) transactionManager.putTransactionStateIfNotExists(txnMetadata) allTransactionalIds += txnlId } @@ -920,7 +942,7 @@ class TransactionStateManagerTest { } private def listExpirableTransactionalIds(): Set[String] = { - val activeTransactionalIds = transactionManager.listTransactionStates(Set.empty, Set.empty, -1L) + val activeTransactionalIds = transactionManager.listTransactionStates(Set.empty, Set.empty, -1L, null) .transactionStates .asScala .map(_.transactionalId) @@ -940,8 +962,8 @@ class TransactionStateManagerTest { @Test def testSuccessfulReimmigration(): Unit = { - txnMetadata1.state = PrepareCommit - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1007,10 +1029,10 @@ class TransactionStateManagerTest { @Test def testLoadTransactionMetadataContainingSegmentEndingWithEmptyBatch(): Unit = { // Simulate a case where a log contains two segments and the first segment ending with an empty batch. - txnMetadata1.state = PrepareCommit - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0))) - txnMetadata2.state = Ongoing - txnMetadata2.addPartitions(Set[TopicPartition](new TopicPartition("topic2", 0))) + txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0))) + txnMetadata2.state(TransactionState.ONGOING) + txnMetadata2.addPartitions(util.Set.of(new TopicPartition("topic2", 0))) // Create the first segment which contains two batches. // The first batch has one transactional record @@ -1056,8 +1078,8 @@ class TransactionStateManagerTest { // all transactions should have been loaded val txnMetadataPool = transactionManager.transactionMetadataCache(partitionId).metadataPerTransactionalId assertEquals(2, txnMetadataPool.size) - assertTrue(txnMetadataPool.contains(transactionalId1)) - assertTrue(txnMetadataPool.contains(transactionalId2)) + assertTrue(txnMetadataPool.containsKey(transactionalId1)) + assertTrue(txnMetadataPool.containsKey(transactionalId2)) } private def verifyMetadataDoesExistAndIsUsable(transactionalId: String): Unit = { @@ -1079,10 +1101,10 @@ class TransactionStateManagerTest { private def expectTransactionalIdExpiration( appendError: Errors, - capturedAppends: mutable.Map[TopicPartition, mutable.Buffer[MemoryRecords]] + capturedAppends: mutable.Map[TopicIdPartition, mutable.Buffer[MemoryRecords]] ): Unit = { - val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) - val callbackCapture: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) + val callbackCapture: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords( anyLong(), @@ -1091,7 +1113,6 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), recordsCapture.capture(), callbackCapture.capture(), - any[Option[ReentrantLock]], any(), any(), any() @@ -1114,7 +1135,7 @@ class TransactionStateManagerTest { partitionIds: Seq[Int], ): Unit = { for (partitionId <- partitionIds) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) } } @@ -1137,26 +1158,26 @@ class TransactionStateManagerTest { loadTransactionsForPartitions(partitionIds) expectLogConfig(partitionIds, ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT) - txnMetadata1.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs - txnMetadata1.state = txnState + txnMetadata1.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) + txnMetadata1.state(txnState) transactionManager.putTransactionStateIfNotExists(txnMetadata1) - txnMetadata2.txnLastUpdateTimestamp = time.milliseconds() + txnMetadata2.txnLastUpdateTimestamp(time.milliseconds()) transactionManager.putTransactionStateIfNotExists(txnMetadata2) - val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(error, appendedRecords) transactionManager.removeExpiredTransactionalIds() val stateAllowsExpiration = txnState match { - case Empty | CompleteCommit | CompleteAbort => true + case TransactionState.EMPTY | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => true case _ => false } if (stateAllowsExpiration) { val partitionId = transactionManager.partitionFor(transactionalId1) - val topicPartition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) + val topicPartition = new TopicIdPartition(transactionTopicId, partitionId, TRANSACTION_STATE_TOPIC_NAME) val expectedTombstone = new SimpleRecord(time.milliseconds(), TransactionLog.keyToBytes(transactionalId1), null) val expectedRecords = MemoryRecords.withRecords(TransactionLog.EnforcedCompression, expectedTombstone) assertEquals(Set(topicPartition), appendedRecords.keySet) @@ -1167,8 +1188,8 @@ class TransactionStateManagerTest { } private def verifyWritesTxnMarkersInPrepareState(state: TransactionState): Unit = { - txnMetadata1.state = state - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + txnMetadata1.state(state) + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1197,11 +1218,11 @@ class TransactionStateManagerTest { private def transactionMetadata(transactionalId: String, producerId: Long, - state: TransactionState = Empty, + state: TransactionState = TransactionState.EMPTY, txnTimeout: Int = transactionTimeoutMs): TransactionMetadata = { val timestamp = time.milliseconds() new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, 0.toShort, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeout, state, collection.mutable.Set.empty[TopicPartition], timestamp, timestamp, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeout, state, util.Set.of, timestamp, timestamp, TV_0) } private def prepareTxnLog(topicPartition: TopicPartition, @@ -1237,21 +1258,22 @@ class TransactionStateManagerTest { private def prepareForTxnMessageAppend(error: Errors): Unit = { reset(replicaManager) - val capturedArgument: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val capturedArgument: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords(anyLong(), anyShort(), internalTopicsAllowed = ArgumentMatchers.eq(true), origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), - any[Map[TopicPartition, MemoryRecords]], + any[Map[TopicIdPartition, MemoryRecords]], capturedArgument.capture(), - any[Option[ReentrantLock]], any(), any(), any() )).thenAnswer(_ => capturedArgument.getValue.apply( - Map(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) -> + Map(new TopicIdPartition(transactionTopicId, partitionId, TRANSACTION_STATE_TOPIC_NAME) -> new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L))) ) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) + when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) } @Test @@ -1272,8 +1294,8 @@ class TransactionStateManagerTest { assertEquals(Double.NaN, partitionLoadTime("partition-load-time-avg"), 0) assertTrue(reporter.containsMbean(mBeanName)) - txnMetadata1.state = Ongoing - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 1), + txnMetadata1.state(TransactionState.ONGOING) + txnMetadata1.addPartitions(util.List.of(new TopicPartition("topic1", 1), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1291,8 +1313,8 @@ class TransactionStateManagerTest { @Test def testIgnoreUnknownRecordType(): Unit = { - txnMetadata1.state = PrepareCommit - txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), + txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1312,7 +1334,7 @@ class TransactionStateManagerTest { assertTrue(transactionManager.transactionMetadataCache.contains(partitionId)) val txnMetadataPool = transactionManager.transactionMetadataCache(partitionId).metadataPerTransactionalId assertFalse(txnMetadataPool.isEmpty) - assertTrue(txnMetadataPool.contains(transactionalId1)) + assertTrue(txnMetadataPool.containsKey(transactionalId1)) val txnMetadata = txnMetadataPool.get(transactionalId1) assertEquals(txnMetadata1.transactionalId, txnMetadata.transactionalId) assertEquals(txnMetadata1.producerId, txnMetadata.producerId) @@ -1332,7 +1354,7 @@ class TransactionStateManagerTest { when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - Collections.singletonMap(TransactionVersion.FEATURE_NAME, transactionVersion.featureLevel()), + util.Map.of(TransactionVersion.FEATURE_NAME, transactionVersion.featureLevel()), 0) } val transactionManager = new TransactionStateManager(0, scheduler, diff --git a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala index c45464333c0d7..7f37eeb25a15d 100755 --- a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala +++ b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala @@ -34,7 +34,7 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import java.io.File import java.time.Duration import java.util -import java.util.{Collections, Properties} +import java.util.Properties import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ import scala.util.Using @@ -364,7 +364,7 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { def changeClientIdConfig(sanitizedClientId: String, configs: Properties): Unit = { Using.resource(createAdminClient(brokers, listenerName)) { admin => { - admin.alterClientQuotas(Collections.singleton( + admin.alterClientQuotas(util.Set.of( new ClientQuotaAlteration( new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> (if (sanitizedClientId == "") null else sanitizedClientId)).asJava), configs.asScala.map { case (key, value) => new ClientQuotaAlteration.Op(key, value.toDouble) }.toList.asJava))).all().get() diff --git a/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala b/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala deleted file mode 100644 index 4bfb5a7104b00..0000000000000 --- a/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.integration - -import java.util.Properties -import kafka.server.KafkaConfig -import kafka.utils.{Logging, TestUtils} - -import scala.jdk.CollectionConverters._ -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import com.yammer.metrics.core.Gauge -import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -class MetricsDuringTopicCreationDeletionTest extends KafkaServerTestHarness with Logging { - - private val nodesNum = 3 - private val topicName = "topic" - private val topicNum = 2 - private val replicationFactor = 3 - private val partitionNum = 3 - private val createDeleteIterations = 3 - - private val overridingProps = new Properties - overridingProps.put(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, "true") - overridingProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false") - // speed up the test for UnderReplicatedPartitions, which relies on the ISR expiry thread to execute concurrently with topic creation - // But the replica.lag.time.max.ms value still need to consider the slow Jenkins testing environment - overridingProps.put(ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_CONFIG, "4000") - - private val testedMetrics = List("OfflinePartitionsCount","PreferredReplicaImbalanceCount","UnderReplicatedPartitions") - private val topics = List.tabulate(topicNum) (n => topicName + n) - - @volatile private var running = true - - override def generateConfigs = TestUtils.createBrokerConfigs(nodesNum) - .map(KafkaConfig.fromProps(_, overridingProps)) - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - // Do some Metrics Registry cleanup by removing the metrics that this test checks. - // This is a test workaround to the issue that prior harness runs may have left a populated registry. - // see https://issues.apache.org/jira/browse/KAFKA-4605 - for (m <- testedMetrics) { - val metricName = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.keys.find(_.getName.endsWith(m)) - metricName.foreach(KafkaYammerMetrics.defaultRegistry.removeMetric) - } - - super.setUp(testInfo) - } - - /* - * checking all metrics we care in a single test is faster though it would be more elegant to have 3 @Test methods - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMetricsDuringTopicCreateDelete(quorum: String): Unit = { - - // For UnderReplicatedPartitions, because of https://issues.apache.org/jira/browse/KAFKA-4605 - // we can't access the metrics value of each server. So instead we directly invoke the method - // replicaManager.underReplicatedPartitionCount() that defines the metrics value. - @volatile var underReplicatedPartitionCount = 0 - - // For OfflinePartitionsCount and PreferredReplicaImbalanceCount even with https://issues.apache.org/jira/browse/KAFKA-4605 - // the test has worked reliably because the metric that gets triggered is the one generated by the first started server (controller) - val offlinePartitionsCountGauge = getGauge("OfflinePartitionsCount") - @volatile var offlinePartitionsCount = offlinePartitionsCountGauge.value - assert(offlinePartitionsCount == 0) - - val preferredReplicaImbalanceCountGauge = getGauge("PreferredReplicaImbalanceCount") - @volatile var preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value - assert(preferredReplicaImbalanceCount == 0) - - // Thread checking the metric continuously - running = true - val thread = new Thread(() => { - while (running) { - for (s <- servers if running) { - underReplicatedPartitionCount = s.replicaManager.underReplicatedPartitionCount - if (underReplicatedPartitionCount > 0) { - running = false - } - } - - preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value - if (preferredReplicaImbalanceCount > 0) { - running = false - } - - offlinePartitionsCount = offlinePartitionsCountGauge.value - if (offlinePartitionsCount > 0) { - running = false - } - } - }) - thread.start() - - // breakable loop that creates and deletes topics - createDeleteTopics() - - // if the thread checking the gauge is still run, stop it - running = false - thread.join() - - assert(offlinePartitionsCount==0, s"Expect offlinePartitionsCount to be 0, but got: $offlinePartitionsCount") - assert(preferredReplicaImbalanceCount==0, s"Expect PreferredReplicaImbalanceCount to be 0, but got: $preferredReplicaImbalanceCount") - assert(underReplicatedPartitionCount==0, s"Expect UnderReplicatedPartitionCount to be 0, but got: $underReplicatedPartitionCount") - } - - private def getGauge(metricName: String) = { - KafkaYammerMetrics.defaultRegistry.allMetrics.asScala - .find { case (k, _) => k.getName.endsWith(metricName) } - .getOrElse(throw new AssertionError( "Unable to find metric " + metricName)) - ._2.asInstanceOf[Gauge[Int]] - } - - private def createDeleteTopics(): Unit = { - for (l <- 1 to createDeleteIterations if running) { - // Create topics - for (t <- topics if running) { - try { - createTopic(t, partitionNum, replicationFactor) - } catch { - case e: Exception => e.printStackTrace() - } - } - - // Delete topics - for (t <- topics if running) { - try { - deleteTopic(t) - TestUtils.verifyTopicDeletion(t, partitionNum, servers) - } catch { - case e: Exception => e.printStackTrace() - } - } - } - } -} diff --git a/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala b/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala deleted file mode 100644 index fb981369e6b66..0000000000000 --- a/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.integration - -import java.util.Properties -import scala.collection.Seq - -import kafka.server.KafkaConfig -import kafka.utils.TestUtils -import org.apache.kafka.server.config.ServerLogConfigs -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -class MinIsrConfigTest extends KafkaServerTestHarness { - val overridingProps = new Properties() - overridingProps.put(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, "5") - def generateConfigs: Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1).map(KafkaConfig.fromProps(_, overridingProps)) - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultKafkaConfig(quorum: String): Unit = { - assert(brokers.head.logManager.initialDefaultConfig.minInSyncReplicas == 5) - } -} diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index e7a8e10d80e9f..03944faaefeec 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -17,6 +17,7 @@ package kafka.integration +import java.util import java.util.Properties import java.util.concurrent.ExecutionException import scala.util.Random @@ -31,7 +32,7 @@ import org.apache.kafka.common.errors.{InvalidConfigurationException, TimeoutExc import org.apache.kafka.common.serialization.StringDeserializer import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry} +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry, FeatureUpdate, UpdateFeaturesOptions} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.metrics.KafkaYammerMetrics @@ -42,6 +43,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import com.yammer.metrics.core.Meter import org.apache.kafka.metadata.LeaderConstants +import org.apache.kafka.server.common.MetadataVersion import org.apache.logging.log4j.core.config.Configurator class UncleanLeaderElectionTest extends QuorumTestHarness { @@ -119,6 +121,14 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { admin = TestUtils.createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), adminConfigs) } + private def disableEligibleLeaderReplicas(): Unit = { + if (metadataVersion.isAtLeast(MetadataVersion.IBP_4_1_IV0)) { + admin.updateFeatures( + util.Map.of("eligible.leader.replicas.version", new FeatureUpdate(0, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE)), + new UpdateFeaturesOptions()).all().get() + } + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) @MethodSource(Array("getTestGroupProtocolParametersAll")) def testUncleanLeaderElectionEnabled(groupProtocol: String): Unit = { @@ -126,6 +136,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { configProps1.put("unclean.leader.election.enable", "true") configProps2.put("unclean.leader.election.enable", "true") startBrokers(Seq(configProps1, configProps2)) + disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) @@ -137,6 +148,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { def testUncleanLeaderElectionDisabled(groupProtocol: String): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) + disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) @@ -151,6 +163,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { configProps1.put("unclean.leader.election.enable", "false") configProps2.put("unclean.leader.election.enable", "false") startBrokers(Seq(configProps1, configProps2)) + disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled val topicProps = new Properties() @@ -167,6 +180,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { configProps1.put("unclean.leader.election.enable", "true") configProps2.put("unclean.leader.election.enable", "true") startBrokers(Seq(configProps1, configProps2)) + disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled val topicProps = new Properties() @@ -180,6 +194,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { @MethodSource(Array("getTestGroupProtocolParametersAll")) def testUncleanLeaderElectionInvalidTopicOverride(groupProtocol: String): Unit = { startBrokers(Seq(configProps1)) + disableEligibleLeaderReplicas() // create topic with an invalid value for unclean leader election val topicProps = new Properties() @@ -317,7 +332,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { valueDeserializer = new StringDeserializer) try { val tp = new TopicPartition(topic, partitionId) - consumer.assign(Seq(tp).asJava) + consumer.assign(util.List.of(tp)) consumer.seek(tp, 0) TestUtils.consumeRecords(consumer, numMessages).map(_.value) } finally consumer.close() @@ -328,6 +343,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { def testTopicUncleanLeaderElectionEnableWithAlterTopicConfigs(groupProtocol: String): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) + disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) @@ -410,10 +426,14 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { } private def alterTopicConfigs(adminClient: Admin, topic: String, topicConfigs: Properties): AlterConfigsResult = { - val configEntries = topicConfigs.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava - adminClient.incrementalAlterConfigs(Map(new ConfigResource(ConfigResource.Type.TOPIC, topic) -> - configEntries.asScala.map((e: ConfigEntry) => new AlterConfigOp(e, AlterConfigOp.OpType.SET)).toSeq - .asJavaCollection).asJava) + val configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) + + val configEntries = topicConfigs.entrySet().stream() + .map(e => new ConfigEntry(e.getKey.toString, e.getValue.toString)) + .map(e => new AlterConfigOp(e, AlterConfigOp.OpType.SET)) + .toList + + adminClient.incrementalAlterConfigs(util.Map.of(configResource, configEntries)) } private def createAdminClient(): Admin = { @@ -427,7 +447,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { private def waitForNoLeaderAndIsrHasOldLeaderId(metadataCache: MetadataCache, leaderId: Int): Unit = { waitUntilTrue(() => metadataCache.getLeaderAndIsr(topic, partitionId).isPresent() && metadataCache.getLeaderAndIsr(topic, partitionId).get.leader() == LeaderConstants.NO_LEADER && - java.util.Arrays.asList(leaderId).equals(metadataCache.getLeaderAndIsr(topic, partitionId).get.isr()), + util.List.of(leaderId).equals(metadataCache.getLeaderAndIsr(topic, partitionId).get.isr()), "Timed out waiting for broker metadata cache updates the info for topic partition:" + topicPartition) } } diff --git a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala index acf21e69ec377..b1e161b975340 100644 --- a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala @@ -71,7 +71,7 @@ abstract class AbstractLogCleanerIntegrationTest { maxCompactionLagMs: Long = defaultMaxCompactionLagMs): Properties = { val props = new Properties() props.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageSize: java.lang.Integer) - props.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize: java.lang.Integer) + props.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentSize: java.lang.Integer) props.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 100*1024: java.lang.Integer) props.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, deleteDelay: java.lang.Integer) props.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala index f93d703f07777..dc9a1d0928a7a 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala @@ -37,8 +37,7 @@ import org.junit.jupiter.api.{AfterEach, Test} import java.lang.{Long => JLong} import java.util import java.util.concurrent.ConcurrentHashMap -import scala.collection.mutable -import scala.jdk.CollectionConverters._ +import java.util.stream.Collectors import scala.jdk.OptionConverters.RichOptional /** @@ -53,7 +52,7 @@ class LogCleanerManagerTest extends Logging { val topicPartition = new TopicPartition("log", 0) val topicPartition2 = new TopicPartition("log2", 0) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig: LogConfig = new LogConfig(logProps) @@ -61,13 +60,13 @@ class LogCleanerManagerTest extends Logging { val offset = 999 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false) - val cleanerCheckpoints: mutable.Map[TopicPartition, JLong] = mutable.Map[TopicPartition, JLong]() + val cleanerCheckpoints: util.HashMap[TopicPartition, JLong] = new util.HashMap[TopicPartition, JLong]() class LogCleanerManagerMock(logDirs: util.List[File], logs: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog], logDirFailureChannel: LogDirFailureChannel) extends LogCleanerManager(logDirs, logs, logDirFailureChannel) { override def allCleanerCheckpoints: util.Map[TopicPartition, JLong] = { - cleanerCheckpoints.toMap.asJava + cleanerCheckpoints } override def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Optional[util.Map.Entry[TopicPartition, JLong]], @@ -370,7 +369,7 @@ class LogCleanerManagerTest extends Logging { // change cleanup policy from delete to compact val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, log.config.segmentSize: Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, log.config.segmentSize(): Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, log.config.retentionMs: java.lang.Long) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) logProps.put(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, 0: Integer) @@ -382,7 +381,11 @@ class LogCleanerManagerTest extends Logging { assertEquals(0, cleanable.size, "should have 0 logs ready to be compacted") // log cleanup finished, and log can be picked up for compaction - cleanerManager.resumeCleaning(deletableLog.asScala.map(_.getKey).toSet.asJava) + cleanerManager.resumeCleaning( + deletableLog.stream() + .map[TopicPartition](entry => entry.getKey) + .collect(Collectors.toSet[TopicPartition]()) + ) val cleanable2 = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).toScala assertEquals(1, cleanable2.size, "should have 1 logs ready to be compacted") @@ -396,7 +399,7 @@ class LogCleanerManagerTest extends Logging { assertEquals(0, deletableLog2.size, "should have 0 logs ready to be deleted") // compaction done, should have 1 log eligible for log cleanup - cleanerManager.doneDeleting(Seq(cleanable2.get.topicPartition).asJava) + cleanerManager.doneDeleting(util.List.of(cleanable2.get.topicPartition)) val deletableLog3 = cleanerManager.pauseCleaningForNonCompactedPartitions() assertEquals(1, deletableLog3.size, "should have 1 logs ready to be deleted") } @@ -501,9 +504,13 @@ class LogCleanerManagerTest extends Logging { val pausedPartitions = cleanerManager.pauseCleaningForNonCompactedPartitions() // Log truncation happens due to unclean leader election cleanerManager.abortAndPauseCleaning(log.topicPartition) - cleanerManager.resumeCleaning(Set(log.topicPartition).asJava) + cleanerManager.resumeCleaning(util.Set.of(log.topicPartition)) // log cleanup finishes and pausedPartitions are resumed - cleanerManager.resumeCleaning(pausedPartitions.asScala.map(_.getKey).toSet.asJava) + cleanerManager.resumeCleaning( + pausedPartitions.stream() + .map[TopicPartition](entry => entry.getKey) + .collect(Collectors.toSet[TopicPartition]()) + ) assertEquals(Optional.empty(), cleanerManager.cleaningState(log.topicPartition)) } @@ -522,7 +529,11 @@ class LogCleanerManagerTest extends Logging { // Broker processes StopReplicaRequest with delete=true cleanerManager.abortCleaning(log.topicPartition) // log cleanup finishes and pausedPartitions are resumed - cleanerManager.resumeCleaning(pausedPartitions.asScala.map(_.getKey).toSet.asJava) + cleanerManager.resumeCleaning( + pausedPartitions.stream() + .map[TopicPartition](entry => entry.getKey) + .collect(Collectors.toSet[TopicPartition]()) + ) assertEquals(Optional.empty(), cleanerManager.cleaningState(log.topicPartition)) } @@ -548,7 +559,7 @@ class LogCleanerManagerTest extends Logging { @Test def testCleanableOffsetsForNone(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -570,7 +581,7 @@ class LogCleanerManagerTest extends Logging { @Test def testCleanableOffsetsActiveSegment(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -592,7 +603,7 @@ class LogCleanerManagerTest extends Logging { def testCleanableOffsetsForTime(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -625,7 +636,7 @@ class LogCleanerManagerTest extends Logging { def testCleanableOffsetsForShortTime(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -667,7 +678,7 @@ class LogCleanerManagerTest extends Logging { def testUndecidedTransactionalDataNotCleanable(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -711,7 +722,7 @@ class LogCleanerManagerTest extends Logging { @Test def testDoneCleaning(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 8) log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), 0) @@ -743,17 +754,17 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) val tp = new TopicPartition("log", 0) - assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp).asJava)) + assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(util.List.of(tp))) cleanerManager.setCleaningState(tp, LogCleaningState.logCleaningPaused(1)) - assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp).asJava)) + assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(util.List.of(tp))) cleanerManager.setCleaningState(tp, LOG_CLEANING_IN_PROGRESS) - cleanerManager.doneDeleting(Seq(tp).asJava) + cleanerManager.doneDeleting(util.List.of(tp)) assertTrue(cleanerManager.cleaningState(tp).isEmpty) cleanerManager.setCleaningState(tp, LOG_CLEANING_ABORTED) - cleanerManager.doneDeleting(Seq(tp).asJava) + cleanerManager.doneDeleting(util.List.of(tp)) assertEquals(LogCleaningState.logCleaningPaused(1), cleanerManager.cleaningState(tp).get) } @@ -771,7 +782,7 @@ class LogCleanerManagerTest extends Logging { val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()) assertEquals(Optional.empty(), filthiestLog, "Log should not be selected for cleaning") - assertEquals(20L, cleanerCheckpoints(tp), "Unselected log should have checkpoint offset updated") + assertEquals(20L, cleanerCheckpoints.get(tp), "Unselected log should have checkpoint offset updated") } /** @@ -793,17 +804,17 @@ class LogCleanerManagerTest extends Logging { val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get assertEquals(tp1, filthiestLog.topicPartition, "Dirtier log should be selected") - assertEquals(15L, cleanerCheckpoints(tp0), "Unselected log should have checkpoint offset updated") + assertEquals(15L, cleanerCheckpoints.get(tp0), "Unselected log should have checkpoint offset updated") } private def createCleanerManager(log: UnifiedLog): LogCleanerManager = { val logs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() logs.put(topicPartition, log) - new LogCleanerManager(Seq(logDir, logDir2).asJava, logs, null) + new LogCleanerManager(util.List.of(logDir, logDir2), logs, null) } private def createCleanerManagerMock(pool: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog]): LogCleanerManagerMock = { - new LogCleanerManagerMock(Seq(logDir).asJava, pool, null) + new LogCleanerManagerMock(util.List.of(logDir), pool, null) } private def createLog(segmentSize: Int, @@ -830,7 +841,7 @@ class LogCleanerManagerTest extends Logging { private def createLowRetentionLogConfig(segmentSize: Int, cleanupPolicy: String): LogConfig = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize: Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentSize: Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 1: Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, cleanupPolicy) logProps.put(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, 0.05: java.lang.Double) // small for easier and clearer tests diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index 09a9d1c40f140..5b842d60e46ba 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -56,7 +56,7 @@ class LogCleanerTest extends Logging { val tmpdir = TestUtils.tempDir() val dir = TestUtils.randomPartitionLogDir(tmpdir) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) @@ -148,7 +148,7 @@ class LogCleanerTest extends Logging { def testCleanSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -181,7 +181,7 @@ class LogCleanerTest extends Logging { // Construct a log instance. The replaceSegments() method of the log instance is overridden so that // it waits for another thread to execute deleteOldSegments() val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024 : java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024 : java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE) val config = LogConfig.fromProps(logConfig.originals, logProps) val topicPartition = UnifiedLog.parseTopicPartitionName(dir) @@ -271,7 +271,7 @@ class LogCleanerTest extends Logging { val originalMaxFileSize = 1024 val cleaner = makeCleaner(2) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, originalMaxFileSize: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, originalMaxFileSize: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact": java.lang.String) logProps.put(TopicConfig.PREALLOCATE_CONFIG, "true": java.lang.String) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -295,7 +295,7 @@ class LogCleanerTest extends Logging { def testDuplicateCheckAfterCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) var log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -464,7 +464,7 @@ class LogCleanerTest extends Logging { def testBasicTransactionAwareCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -497,7 +497,7 @@ class LogCleanerTest extends Logging { def testCleanWithTransactionsSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -543,7 +543,7 @@ class LogCleanerTest extends Logging { def testCommitMarkerRemoval(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -591,7 +591,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(capacity = Int.MaxValue, maxMessageSize = 100) val logProps = new Properties() logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, 100: java.lang.Integer) - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1000: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1000: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -619,7 +619,7 @@ class LogCleanerTest extends Logging { def testCommitMarkerRetentionWithEmptyBatch(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -682,7 +682,7 @@ class LogCleanerTest extends Logging { def testCleanEmptyControlBatch(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -714,7 +714,7 @@ class LogCleanerTest extends Logging { def testCommittedTransactionSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L @@ -736,7 +736,7 @@ class LogCleanerTest extends Logging { def testAbortedTransactionSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L @@ -765,7 +765,7 @@ class LogCleanerTest extends Logging { def testAbortMarkerRemoval(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -799,7 +799,7 @@ class LogCleanerTest extends Logging { val producerId = 1L val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val appendFirstTransaction = appendTransactionalAsLeader(log, producerId, producerEpoch, 0, AppendOrigin.REPLICATION) @@ -832,7 +832,7 @@ class LogCleanerTest extends Logging { def testAbortMarkerRetentionWithEmptyBatch(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -896,7 +896,7 @@ class LogCleanerTest extends Logging { // Create cleaner with very small default max message size val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -913,7 +913,7 @@ class LogCleanerTest extends Logging { // clean the log val stats = new CleanerStats(Time.SYSTEM) - cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head).asJava, map, 0L, stats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), map, 0L, stats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) val shouldRemain = LogTestUtils.keysInLog(log).filterNot(keys.contains) assertEquals(shouldRemain, LogTestUtils.keysInLog(log)) } @@ -926,7 +926,7 @@ class LogCleanerTest extends Logging { val (log, offsetMap) = createLogWithMessagesLargerThanMaxSize(largeMessageSize = 1024 * 1024) val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) - cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head).asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) val shouldRemain = LogTestUtils.keysInLog(log).filter(k => !offsetMap.map.containsKey(k.toString)) assertEquals(shouldRemain, LogTestUtils.keysInLog(log)) } @@ -945,7 +945,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) assertThrows(classOf[CorruptRecordException], () => - cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head).asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) ) } @@ -962,13 +962,13 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) assertThrows(classOf[CorruptRecordException], () => - cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head).asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) ) } def createLogWithMessagesLargerThanMaxSize(largeMessageSize: Int): (UnifiedLog, FakeOffsetMap) = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -994,7 +994,7 @@ class LogCleanerTest extends Logging { def testCleaningWithDeletes(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1021,7 +1021,7 @@ class LogCleanerTest extends Logging { // because loadFactor is 0.75, this means we can fit 3 messages in the map val cleaner = makeCleaner(4) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1050,7 +1050,7 @@ class LogCleanerTest extends Logging { def testLogCleanerRetainsProducerLastSequence(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0, 0), 0) // offset 0 @@ -1073,7 +1073,7 @@ class LogCleanerTest extends Logging { def testLogCleanerRetainsLastSequenceEvenIfTransactionAborted(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -1107,7 +1107,7 @@ class LogCleanerTest extends Logging { def testCleaningWithKeysConflictingWithTxnMarkerKeys(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val leaderEpoch = 5 val producerEpoch = 0.toShort @@ -1151,7 +1151,7 @@ class LogCleanerTest extends Logging { // because loadFactor is 0.75, this means we can fit 1 message in the map val cleaner = makeCleaner(2) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1182,7 +1182,7 @@ class LogCleanerTest extends Logging { def testCleaningWithUncleanableSection(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1225,7 +1225,7 @@ class LogCleanerTest extends Logging { def testLogToClean(): Unit = { // create a log with small segment size val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment @@ -1243,7 +1243,7 @@ class LogCleanerTest extends Logging { def testLogToCleanWithUncleanableSection(): Unit = { // create a log with small segment size val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment @@ -1276,7 +1276,7 @@ class LogCleanerTest extends Logging { // create a log with compaction turned off so we can append unkeyed messages val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1334,7 +1334,7 @@ class LogCleanerTest extends Logging { def testCleanSegmentsWithAbort(): Unit = { val cleaner = makeCleaner(Int.MaxValue, abortCheckDone) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1356,7 +1356,7 @@ class LogCleanerTest extends Logging { def testCleanSegmentsRetainingLastEmptyBatch(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1388,7 +1388,7 @@ class LogCleanerTest extends Logging { def testSegmentGrouping(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1489,7 +1489,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1541,7 +1541,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) //mimic the effect of loading an empty index file logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 400: java.lang.Integer) @@ -1636,7 +1636,7 @@ class LogCleanerTest extends Logging { // Try to clean segment with offset overflow. This will trigger log split and the cleaning itself must abort. assertThrows(classOf[LogCleaningAbortedException], () => - cleaner.cleanSegments(log, Seq(segmentWithOverflow).asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, util.List.of(segmentWithOverflow), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, segmentWithOverflow.readNextOffset) ) assertEquals(numSegmentsInitial + 1, log.logSegments.size) @@ -1646,7 +1646,7 @@ class LogCleanerTest extends Logging { // Clean each segment now that split is complete. val upperBoundOffset = log.logSegments.asScala.last.readNextOffset for (segmentToClean <- log.logSegments.asScala) - cleaner.cleanSegments(log, List(segmentToClean).asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, util.List.of(segmentToClean), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, upperBoundOffset) assertEquals(expectedKeysAfterCleaning, LogTestUtils.keysInLog(log)) assertFalse(LogTestUtils.hasOffsetOverflow(log)) @@ -1666,7 +1666,7 @@ class LogCleanerTest extends Logging { def testRecoveryAfterCrash(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) logProps.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, 10: java.lang.Integer) @@ -1797,7 +1797,7 @@ class LogCleanerTest extends Logging { def testBuildOffsetMapFakeLarge(): Unit = { val map = new FakeOffsetMap(1000) val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 120: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 120: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 120: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) @@ -1945,7 +1945,7 @@ class LogCleanerTest extends Logging { @Test def testCleaningBeyondMissingOffsets(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024*1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024*1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) val cleaner = makeCleaner(Int.MaxValue) diff --git a/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala b/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala deleted file mode 100644 index 0da8366f443ee..0000000000000 --- a/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.log - -import java.util.{Optional, Properties} -import java.util.concurrent.{Callable, Executors} -import kafka.utils.TestUtils -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.record.SimpleRecord -import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.storage.log.FetchIsolation -import org.apache.kafka.server.util.KafkaScheduler -import org.apache.kafka.storage.internals.log.{LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} -import org.apache.kafka.storage.log.metrics.BrokerTopicStats -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} - -import scala.collection.mutable.ListBuffer -import scala.util.Random - -class LogConcurrencyTest { - private val brokerTopicStats = new BrokerTopicStats - private val random = new Random() - private val scheduler = new KafkaScheduler(1) - private val tmpDir = TestUtils.tempDir() - private val logDir = TestUtils.randomPartitionLogDir(tmpDir) - - @BeforeEach - def setup(): Unit = { - scheduler.startup() - } - - @AfterEach - def shutdown(): Unit = { - scheduler.shutdown() - Utils.delete(tmpDir) - } - - @Test - def testUncommittedDataNotConsumed(): Unit = { - testUncommittedDataNotConsumed(createLog()) - } - - @Test - def testUncommittedDataNotConsumedFrequentSegmentRolls(): Unit = { - val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 237: Integer) - val logConfig = new LogConfig(logProps) - testUncommittedDataNotConsumed(createLog(logConfig)) - } - - def testUncommittedDataNotConsumed(log: UnifiedLog): Unit = { - val executor = Executors.newFixedThreadPool(2) - try { - val maxOffset = 5000 - val consumer = new ConsumerTask(log, maxOffset) - val appendTask = new LogAppendTask(log, maxOffset) - - val consumerFuture = executor.submit(consumer) - val fetcherTaskFuture = executor.submit(appendTask) - - fetcherTaskFuture.get() - consumerFuture.get() - - validateConsumedData(log, consumer.consumedBatches) - } finally executor.shutdownNow() - } - - /** - * Simple consumption task which reads the log in ascending order and collects - * consumed batches for validation - */ - private class ConsumerTask(log: UnifiedLog, lastOffset: Int) extends Callable[Unit] { - val consumedBatches = ListBuffer.empty[FetchedBatch] - - override def call(): Unit = { - var fetchOffset = 0L - while (log.highWatermark < lastOffset) { - val readInfo = log.read(fetchOffset, 1, FetchIsolation.HIGH_WATERMARK, true) - readInfo.records.batches().forEach { batch => - consumedBatches += FetchedBatch(batch.baseOffset, batch.partitionLeaderEpoch) - fetchOffset = batch.lastOffset + 1 - } - } - } - } - - /** - * This class simulates basic leader/follower behavior. - */ - private class LogAppendTask(log: UnifiedLog, lastOffset: Long) extends Callable[Unit] { - override def call(): Unit = { - var leaderEpoch = 1 - var isLeader = true - - while (log.highWatermark < lastOffset) { - random.nextInt(2) match { - case 0 => - val logEndOffsetMetadata = log.logEndOffsetMetadata - val logEndOffset = logEndOffsetMetadata.messageOffset - val batchSize = random.nextInt(9) + 1 - val records = (0 to batchSize).map(i => new SimpleRecord(s"$i".getBytes)) - - if (isLeader) { - log.appendAsLeader(TestUtils.records(records), leaderEpoch) - log.maybeIncrementHighWatermark(logEndOffsetMetadata) - } else { - log.appendAsFollower( - TestUtils.records( - records, - baseOffset = logEndOffset, - partitionLeaderEpoch = leaderEpoch - ), - Int.MaxValue - ) - log.updateHighWatermark(logEndOffset) - } - - case 1 => - isLeader = !isLeader - leaderEpoch += 1 - - if (!isLeader) { - log.truncateTo(log.highWatermark) - } - } - } - } - } - - private def createLog(config: LogConfig = new LogConfig(new Properties())): UnifiedLog = { - UnifiedLog.create( - logDir, - config, - 0L, - 0L, - scheduler, - brokerTopicStats, - Time.SYSTEM, - 5 * 60 * 1000, - new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty - ) - } - - private def validateConsumedData(log: UnifiedLog, consumedBatches: Iterable[FetchedBatch]): Unit = { - val iter = consumedBatches.iterator - log.logSegments.forEach { segment => - segment.log.batches.forEach { batch => - if (iter.hasNext) { - val consumedBatch = iter.next() - try { - assertEquals(batch.partitionLeaderEpoch, - consumedBatch.epoch, "Consumed batch with unexpected leader epoch") - assertEquals(batch.baseOffset, - consumedBatch.baseOffset, "Consumed batch with unexpected base offset") - } catch { - case t: Throwable => - throw new AssertionError(s"Consumed batch $consumedBatch " + - s"does not match next expected batch in log $batch", t) - } - } - } - } - } - - private case class FetchedBatch(baseOffset: Long, epoch: Int) { - override def toString: String = { - s"FetchedBatch(baseOffset=$baseOffset, epoch=$epoch)" - } - } - -} diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index 1e26d653bbccf..e942e7e33805c 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -23,40 +23,19 @@ import org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM import org.apache.kafka.common.config.ConfigDef.Type.INT import org.apache.kafka.common.config.{ConfigException, SslConfigs, TopicConfig} import org.apache.kafka.common.errors.InvalidConfigurationException -import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test -import java.util.{Collections, Properties} +import java.util +import java.util.Properties import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.storage.internals.log.{LogConfig, ThrottledReplicaListValidator} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import scala.jdk.CollectionConverters._ - class LogConfigTest { - /** - * This test verifies that KafkaConfig object initialization does not depend on - * LogConfig initialization. Bad things happen due to static initialization - * order dependencies. For example, LogConfig.configDef ends up adding null - * values in serverDefaultConfigNames. This test ensures that the mapping of - * keys from LogConfig to KafkaConfig are not missing values. - */ - @Test - def ensureNoStaticInitializationOrderDependency(): Unit = { - // Access any KafkaConfig val to load KafkaConfig object before LogConfig. - assertNotNull(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG) - assertTrue(LogConfig.configNames.asScala - .filter(config => !LogConfig.CONFIGS_WITH_NO_SERVER_DEFAULTS.contains(config)) - .forall { config => - val serverConfigOpt = LogConfig.serverConfigName(config) - serverConfigOpt.isPresent && (serverConfigOpt.get != null) - }) - } - @Test def testKafkaConfigToProps(): Unit = { val millisInHour = 60L * 60L * 1000L @@ -94,6 +73,7 @@ class LogConfigTest { case TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG => assertPropertyInvalid(name, "not_a_number", "-0.1") case TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG => assertPropertyInvalid(name, "not_a_number", "remove", "0") case TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG => assertPropertyInvalid(name, "not_a_number", "remove", "0") + case LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG => // no op case _ => assertPropertyInvalid(name, "not_a_number", "-1") }) @@ -142,7 +122,7 @@ class LogConfigTest { /* Sanity check that toHtml produces one of the expected configs */ @Test def testToHtml(): Unit = { - val html = LogConfig.configDefCopy.toHtml(4, (key: String) => "prefix_" + key, Collections.emptyMap()) + val html = LogConfig.configDefCopy.toHtml(4, (key: String) => "prefix_" + key, util.Map.of) val expectedConfig = "

    file.delete.delay.ms

    " assertTrue(html.contains(expectedConfig), s"Could not find `$expectedConfig` in:\n $html") } @@ -293,29 +273,31 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(util.Map.of, props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) } @Test - def testEnableRemoteLogStorageOnCompactedTopic(): Unit = { + def testEnableRemoteLogStorageCleanupPolicy(): Unit = { val kafkaProps = TestUtils.createDummyBrokerConfig() kafkaProps.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true") val kafkaConfig = KafkaConfig.fromProps(kafkaProps) - val logProps = new Properties() + def validateCleanupPolicy(): Unit = { + LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + } logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) - + validateCleanupPolicy() logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) - assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") - assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") - assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) + logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete") + validateCleanupPolicy() + logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "") + validateCleanupPolicy() } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -328,10 +310,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -347,8 +329,8 @@ class LogConfigTest { logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false") if (wasRemoteStorageEnabled) { val message = assertThrows(classOf[InvalidConfigurationException], - () => LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) assertTrue(message.getMessage.contains("It is invalid to disable remote storage without deleting remote data. " + "If you want to keep the remote data and turn to read only, please set `remote.storage.enable=true,remote.log.copy.disable=true`. " + "If you want to disable remote storage and delete all remote data, please set `remote.storage.enable=false,remote.log.delete.on.disable=true`.")) @@ -356,12 +338,12 @@ class LogConfigTest { // It should be able to disable the remote log storage when delete on disable is set to true logProps.put(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, "true") - LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } else { - LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) - LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), logProps, - kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), logProps, + kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } } @@ -380,12 +362,12 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } } @@ -404,12 +386,12 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } } @@ -424,10 +406,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) } } @@ -446,21 +428,4 @@ class LogConfigTest { logProps.put(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, deleteOnDisable.toString) LogConfig.validate(logProps) } - - @Test - def testValidateWithMetadataVersionJbodSupport(): Unit = { - def validate(metadataVersion: MetadataVersion, jbodConfig: Boolean): Unit = - KafkaConfig.fromProps( - TestUtils.createBrokerConfig(nodeId = 0, logDirCount = if (jbodConfig) 2 else 1) - ).validateWithMetadataVersion(metadataVersion) - - validate(MetadataVersion.IBP_3_6_IV2, jbodConfig = false) - validate(MetadataVersion.IBP_3_7_IV0, jbodConfig = false) - validate(MetadataVersion.IBP_3_7_IV2, jbodConfig = false) - assertThrows(classOf[IllegalArgumentException], () => - validate(MetadataVersion.IBP_3_6_IV2, jbodConfig = true)) - assertThrows(classOf[IllegalArgumentException], () => - validate(MetadataVersion.IBP_3_7_IV0, jbodConfig = true)) - validate(MetadataVersion.IBP_3_7_IV2, jbodConfig = true) - } } diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala index 8e417a695ee36..c1d611ce6dc43 100644 --- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala @@ -21,7 +21,6 @@ import kafka.server.KafkaConfig import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, SimpleRecord, TimestampType} import org.apache.kafka.common.utils.{Time, Utils} @@ -245,7 +244,7 @@ class LogLoaderTest { @Test def testProducerSnapshotsRecoveryAfterUncleanShutdown(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "640") + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "640") val logConfig = new LogConfig(logProps) var log = createLog(logDir, logConfig) assertEquals(OptionalLong.empty(), log.oldestProducerSnapshotOffset) @@ -287,7 +286,7 @@ class LogLoaderTest { val wrapper = Mockito.spy(segment) Mockito.doAnswer { in => segmentsWithReads += wrapper - segment.read(in.getArgument(0, classOf[java.lang.Long]), in.getArgument(1, classOf[java.lang.Integer]), in.getArgument(2, classOf[java.util.Optional[java.lang.Long]]), in.getArgument(3, classOf[java.lang.Boolean])) + segment.read(in.getArgument(0, classOf[java.lang.Long]), in.getArgument(1, classOf[java.lang.Integer]), in.getArgument(2, classOf[util.Optional[java.lang.Long]]), in.getArgument(3, classOf[java.lang.Boolean])) }.when(wrapper).read(ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any()) Mockito.doAnswer { in => recoveredSegments += wrapper @@ -392,12 +391,12 @@ class LogLoaderTest { codec: Compression = Compression.NONE, timestamp: Long = RecordBatch.NO_TIMESTAMP, magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = { - val records = Seq(new SimpleRecord(timestamp, key, value)) + val records = util.List.of(new SimpleRecord(timestamp, key, value)) - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset, mockTime.milliseconds, leaderEpoch) - records.foreach(builder.append) + records.forEach(builder.append) builder.build() } @@ -560,7 +559,7 @@ class LogLoaderTest { false, LogOffsetsListener.NO_OP_OFFSETS_LISTENER) - verify(stateManager).removeStraySnapshots(any[java.util.List[java.lang.Long]]) + verify(stateManager).removeStraySnapshots(any[util.List[java.lang.Long]]) verify(stateManager, times(2)).updateMapEndOffset(0L) verify(stateManager, times(2)).takeSnapshot() verify(stateManager).isEmpty @@ -1216,11 +1215,11 @@ class LogLoaderTest { val fourthBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 3, offset = 3) log.appendAsFollower(fourthBatch, Int.MaxValue) - assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) + assertEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) // deliberately remove some of the epoch entries leaderEpochCache.truncateFromEndAsyncFlush(2) - assertNotEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) + assertNotEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) log.close() // reopen the log and recover from the beginning @@ -1228,7 +1227,7 @@ class LogLoaderTest { val recoveredLeaderEpochCache = recoveredLog.leaderEpochCache // epoch entries should be recovered - assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries) + assertEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries) recoveredLog.close() } diff --git a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala index 5e721596ce057..a8946a3d1395f 100755 --- a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala @@ -23,15 +23,14 @@ import org.apache.directory.api.util.FileUtils import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.OffsetOutOfRangeException import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{DirectoryId, KafkaException, TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.{DirectoryId, KafkaException, TopicPartition, Uuid} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.image.{TopicImage, TopicsImage} -import org.apache.kafka.metadata.{ConfigRepository, LeaderRecoveryState, MockConfigRepository, PartitionRegistration} +import org.apache.kafka.metadata.{ConfigRepository, MockConfigRepository} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers.any -import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} +import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{doAnswer, doNothing, mock, never, spy, times, verify} import java.io._ @@ -39,12 +38,12 @@ import java.lang.{Long => JLong} import java.nio.file.Files import java.nio.file.attribute.PosixFilePermission import java.util -import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, Future} +import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.{Collections, Optional, OptionalLong, Properties} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.{FileLock, KafkaScheduler, MockTime, Scheduler} -import org.apache.kafka.storage.internals.log.{CleanerConfig, FetchDataInfo, LogConfig, LogDirFailureChannel, LogMetricNames, LogOffsetsListener, LogStartOffsetIncrementReason, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, FetchDataInfo, LogConfig, LogDirFailureChannel, LogMetricNames, LogManager => JLogManager, LogOffsetsListener, LogStartOffsetIncrementReason, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} import org.apache.kafka.storage.internals.checkpoint.{CleanShutdownFileHandler, OffsetCheckpointFile} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.function.Executable @@ -56,13 +55,12 @@ import scala.jdk.CollectionConverters._ import scala.util.{Failure, Try} class LogManagerTest { - import LogManagerTest._ val time = new MockTime() val maxRollInterval = 100 val maxLogAgeMs: Int = 10 * 60 * 1000 val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 4096: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, maxLogAgeMs: java.lang.Integer) val logConfig = new LogConfig(logProps) @@ -393,7 +391,7 @@ class LogManagerTest { logManager.shutdown() val segmentBytes = 10 * setSize val properties = new Properties() - properties.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentBytes.toString) + properties.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentBytes.toString) properties.put(TopicConfig.RETENTION_BYTES_CONFIG, (5L * 10L * setSize + 10L).toString) val configRepository = MockConfigRepository.forTopic(name, properties) @@ -536,7 +534,7 @@ class LogManagerTest { true } - logManager.loadLog(log.dir, hadCleanShutdown = true, Collections.emptyMap[TopicPartition, JLong], Collections.emptyMap[TopicPartition, JLong], logConfig, Map.empty, new ConcurrentHashMap[String, Integer](), providedIsStray) + logManager.loadLog(log.dir, hadCleanShutdown = true, util.Map.of[TopicPartition, JLong], util.Map.of[TopicPartition, JLong], logConfig, Map.empty, new ConcurrentHashMap[String, Integer](), providedIsStray) assertEquals(1, invokedCount) assertTrue( logDir.listFiles().toSet @@ -592,7 +590,7 @@ class LogManagerTest { } logManager.checkpointLogRecoveryOffsets() - val checkpoints = new OffsetCheckpointFile(new File(logDir, LogManager.RecoveryPointCheckpointFile), null).read() + val checkpoints = new OffsetCheckpointFile(new File(logDir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), null).read() topicPartitions.zip(logs).foreach { case (tp, log) => assertEquals(checkpoints.get(tp), log.recoveryPoint, "Recovery point should equal checkpoint") @@ -672,7 +670,7 @@ class LogManagerTest { logManager.checkpointRecoveryOffsetsInDir(logDir) - val checkpoints = new OffsetCheckpointFile(new File(logDir, LogManager.RecoveryPointCheckpointFile), null).read() + val checkpoints = new OffsetCheckpointFile(new File(logDir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), null).read() tps.zip(allLogs).foreach { case (tp, log) => assertEquals(checkpoints.get(tp), log.recoveryPoint, @@ -1094,36 +1092,6 @@ class LogManagerTest { verifyMetrics(1) } - @Test - def testWaitForAllToComplete(): Unit = { - var invokedCount = 0 - val success: Future[Boolean] = Mockito.mock(classOf[Future[Boolean]]) - Mockito.when(success.get()).thenAnswer { _ => - invokedCount += 1 - true - } - val failure: Future[Boolean] = Mockito.mock(classOf[Future[Boolean]]) - Mockito.when(failure.get()).thenAnswer{ _ => - invokedCount += 1 - throw new RuntimeException - } - - var failureCount = 0 - // all futures should be evaluated - assertFalse(LogManager.waitForAllToComplete(Seq(success, failure), _ => failureCount += 1)) - assertEquals(2, invokedCount) - assertEquals(1, failureCount) - assertFalse(LogManager.waitForAllToComplete(Seq(failure, success), _ => failureCount += 1)) - assertEquals(4, invokedCount) - assertEquals(2, failureCount) - assertTrue(LogManager.waitForAllToComplete(Seq(success, success), _ => failureCount += 1)) - assertEquals(6, invokedCount) - assertEquals(2, failureCount) - assertFalse(LogManager.waitForAllToComplete(Seq(failure, failure), _ => failureCount += 1)) - assertEquals(8, invokedCount) - assertEquals(4, failureCount) - } - @Test def testLoadDirectoryIds(): Unit = { val dirs: Seq[File] = Seq.fill(5)(TestUtils.tempDir()) @@ -1161,7 +1129,7 @@ class LogManagerTest { remoteStorageSystemEnable = true ) - val checkpointFile = new File(logDir, LogManager.LogStartOffsetCheckpointFile) + val checkpointFile = new File(logDir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE) val checkpoint = new OffsetCheckpointFile(checkpointFile, null) val topicPartition = new TopicPartition("test", 0) val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) @@ -1192,7 +1160,7 @@ class LogManagerTest { @Test def testCheckpointLogStartOffsetForNormalTopic(): Unit = { - val checkpointFile = new File(logDir, LogManager.LogStartOffsetCheckpointFile) + val checkpointFile = new File(logDir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE) val checkpoint = new OffsetCheckpointFile(checkpointFile, null) val topicPartition = new TopicPartition("test", 0) val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) @@ -1233,65 +1201,6 @@ class LogManagerTest { new File(dir, MetaPropertiesEnsemble.META_PROPERTIES_NAME).getAbsolutePath, false) } - val foo0 = new TopicIdPartition(Uuid.fromString("Sl08ZXU2QW6uF5hIoSzc8w"), new TopicPartition("foo", 0)) - val foo1 = new TopicIdPartition(Uuid.fromString("Sl08ZXU2QW6uF5hIoSzc8w"), new TopicPartition("foo", 1)) - val bar0 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 0)) - val bar1 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 1)) - val baz0 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 0)) - val baz1 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 1)) - val baz2 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 2)) - val quux0 = new TopicIdPartition(Uuid.fromString("YS9owjv5TG2OlsvBM0Qw6g"), new TopicPartition("quux", 0)) - val recreatedFoo0 = new TopicIdPartition(Uuid.fromString("_dOOzPe3TfiWV21Lh7Vmqg"), new TopicPartition("foo", 0)) - val recreatedFoo1 = new TopicIdPartition(Uuid.fromString("_dOOzPe3TfiWV21Lh7Vmqg"), new TopicPartition("foo", 1)) - - @Test - def testIsStrayKraftReplicaWithEmptyImage(): Unit = { - val image: TopicsImage = topicsImage(Seq()) - val onDisk = Seq(foo0, foo1, bar0, bar1, quux0).map(mockLog) - assertTrue(onDisk.forall(log => LogManager.isStrayKraftReplica(0, image, log))) - } - - @Test - def testIsStrayKraftReplicaInImage(): Unit = { - val image: TopicsImage = topicsImage(Seq( - topicImage(Map( - foo0 -> Seq(0, 1, 2), - )), - topicImage(Map( - bar0 -> Seq(0, 1, 2), - bar1 -> Seq(0, 1, 2), - )) - )) - val onDisk = Seq(foo0, foo1, bar0, bar1, quux0).map(mockLog) - val expectedStrays = Set(foo1, quux0).map(_.topicPartition()) - - onDisk.foreach(log => assertEquals(expectedStrays.contains(log.topicPartition), LogManager.isStrayKraftReplica(0, image, log))) - } - - @Test - def testIsStrayKraftReplicaInImageWithRemoteReplicas(): Unit = { - val image: TopicsImage = topicsImage(Seq( - topicImage(Map( - foo0 -> Seq(0, 1, 2), - )), - topicImage(Map( - bar0 -> Seq(1, 2, 3), - bar1 -> Seq(2, 3, 0), - )) - )) - val onDisk = Seq(foo0, bar0, bar1).map(mockLog) - val expectedStrays = Set(bar0).map(_.topicPartition) - - onDisk.foreach(log => assertEquals(expectedStrays.contains(log.topicPartition), LogManager.isStrayKraftReplica(0, image, log))) - } - - @Test - def testIsStrayKraftMissingTopicId(): Unit = { - val log = Mockito.mock(classOf[UnifiedLog]) - Mockito.when(log.topicId).thenReturn(Optional.empty) - assertTrue(LogManager.isStrayKraftReplica(0, topicsImage(Seq()), log)) - } - /** * Test LogManager takes file lock by default and the lock is released after shutdown. */ @@ -1302,12 +1211,12 @@ class LogManagerTest { try { // ${tmpLogDir}.lock is acquired by tmpLogManager - val fileLock = new FileLock(new File(tmpLogDir, LogManager.LockFileName)) + val fileLock = new FileLock(new File(tmpLogDir, JLogManager.LOCK_FILE_NAME)) assertFalse(fileLock.tryLock()) } finally { // ${tmpLogDir}.lock is removed after shutdown tmpLogManager.shutdown() - val f = new File(tmpLogDir, LogManager.LockFileName) + val f = new File(tmpLogDir, JLogManager.LOCK_FILE_NAME) assertFalse(f.exists()) } } @@ -1376,56 +1285,3 @@ class LogManagerTest { } } } - -object LogManagerTest { - def mockLog( - topicIdPartition: TopicIdPartition - ): UnifiedLog = { - val log = Mockito.mock(classOf[UnifiedLog]) - Mockito.when(log.topicId).thenReturn(Optional.of(topicIdPartition.topicId())) - Mockito.when(log.topicPartition).thenReturn(topicIdPartition.topicPartition()) - log - } - - def topicImage( - partitions: Map[TopicIdPartition, Seq[Int]] - ): TopicImage = { - var topicName: String = null - var topicId: Uuid = null - partitions.keySet.foreach { - partition => if (topicId == null) { - topicId = partition.topicId() - } else if (!topicId.equals(partition.topicId())) { - throw new IllegalArgumentException("partition topic IDs did not match") - } - if (topicName == null) { - topicName = partition.topic() - } else if (!topicName.equals(partition.topic())) { - throw new IllegalArgumentException("partition topic names did not match") - } - } - if (topicId == null) { - throw new IllegalArgumentException("Invalid empty partitions map.") - } - val partitionRegistrations = partitions.map { case (partition, replicas) => - Int.box(partition.partition()) -> new PartitionRegistration.Builder(). - setReplicas(replicas.toArray). - setDirectories(DirectoryId.unassignedArray(replicas.size)). - setIsr(replicas.toArray). - setLeader(replicas.head). - setLeaderRecoveryState(LeaderRecoveryState.RECOVERED). - setLeaderEpoch(0). - setPartitionEpoch(0). - build() - } - new TopicImage(topicName, topicId, partitionRegistrations.asJava) - } - - def topicsImage( - topics: Seq[TopicImage] - ): TopicsImage = { - var retval = TopicsImage.EMPTY - topics.foreach { t => retval = retval.including(t) } - retval - } -} diff --git a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala index 04c91741037bb..0ff68988d76fb 100644 --- a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala +++ b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala @@ -75,7 +75,7 @@ object LogTestUtils { remoteLogDeleteOnDisable: Boolean = DEFAULT_REMOTE_LOG_DELETE_ON_DISABLE_CONFIG): LogConfig = { val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_MS_CONFIG, segmentMs: java.lang.Long) - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentBytes: Integer) + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentBytes: Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, retentionMs: java.lang.Long) logProps.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs: java.lang.Long) logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, retentionBytes: java.lang.Long) diff --git a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala index 3f73f8f731a71..e6fdf09331bfc 100755 --- a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala +++ b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala @@ -41,7 +41,7 @@ import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffs import org.apache.kafka.server.util.{KafkaScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, Cleaner, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, AsyncOffsetReader, Cleaner, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, UnifiedLog, VerificationGuard} import org.apache.kafka.storage.internals.utils.Throttler import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats} import org.junit.jupiter.api.Assertions._ @@ -231,7 +231,7 @@ class UnifiedLogTest { reopened.truncateFullyAndStartAt(2L, Optional.of(1L)) assertEquals(Optional.empty, reopened.firstUnstableOffset) - assertEquals(java.util.Collections.emptyMap(), reopened.producerStateManager.activeProducers) + assertEquals(util.Map.of, reopened.producerStateManager.activeProducers) assertEquals(1L, reopened.logStartOffset) assertEquals(2L, reopened.logEndOffset) } @@ -274,7 +274,7 @@ class UnifiedLogTest { truncateFunc(reopened, 0L) assertEquals(Optional.empty, reopened.firstUnstableOffset) - assertEquals(java.util.Collections.emptyMap(), reopened.producerStateManager.activeProducers) + assertEquals(util.Map.of, reopened.producerStateManager.activeProducers) } @Test @@ -1980,7 +1980,7 @@ class UnifiedLogTest { val log = createLog(logDir, LogTestUtils.createLogConfig(maxMessageBytes = second.sizeInBytes - 1)) log.appendAsFollower(first, Int.MaxValue) - // the second record is larger then limit but appendAsFollower does not validate the size. + // the second record is larger than limit but appendAsFollower does not validate the size. log.appendAsFollower(second, Int.MaxValue) } @@ -1991,7 +1991,7 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) val previousEndOffset = log.logEndOffsetMetadata.messageOffset - if (expectedException.isPresent()) { + if (expectedException.isPresent) { assertThrows( expectedException.get(), () => log.appendAsFollower(records, Int.MaxValue) @@ -2416,6 +2416,193 @@ class UnifiedLogTest { KafkaConfig.fromProps(props) } + @Test + def testFetchEarliestPendingUploadTimestampNoRemoteStorage(): Unit = { + val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) + val log = createLog(logDir, logConfig) + + // Test initial state before any records + assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + + // Append records + val _ = prepareLogWithSequentialRecords(log, recordCount = 2) + + // Test state after records are appended + assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + } + + @Test + def testFetchEarliestPendingUploadTimestampWithRemoteStorage(): Unit = { + val logStartOffset = 0 + val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) + + val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) + val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) + val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) + + doAnswer(ans => { + val timestamp = ans.getArgument(1).asInstanceOf[Long] + Optional.of(timestamp) + .filter(_ == timestampAndEpochs.head.timestamp) + .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(timestampAndEpochs.head.leaderEpoch))) + }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) + + // Offset 0 (first timestamp) is in remote storage and deleted locally. Offset 1 (second timestamp) is in local storage. + log.updateLocalLogStartOffset(1) + log.updateHighestOffsetInRemoteStorage(0) + + // In the assertions below we test that offset 0 (first timestamp) is only in remote and offset 1 (second timestamp) is in local storage. + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), + ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), + ListOffsetsRequest.LATEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + } + + @Test + def testFetchEarliestPendingUploadTimestampWithRemoteStorageNoLocalDeletion(): Unit = { + val logStartOffset = 0 + val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) + + val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) + val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) + val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) + + // Offsets upto 1 are in remote storage + doAnswer(ans => { + val timestamp = ans.getArgument(1).asInstanceOf[Long] + Optional.of( + timestamp match { + case x if x == firstTimestamp => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch)) + case x if x == secondTimestamp => new TimestampAndOffset(x, 1L, Optional.of(secondLeaderEpoch)) + case _ => null + } + ) + }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) + + // Offsets 0, 1 (first and second timestamps) are in remote storage and not deleted locally. + log.updateLocalLogStartOffset(0) + log.updateHighestOffsetInRemoteStorage(1) + + // In the assertions below we test that offset 0 (first timestamp) and offset 1 (second timestamp) are on both remote and local storage + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), + ListOffsetsRequest.LATEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(thirdLeaderEpoch)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + } + + @Test + def testFetchEarliestPendingUploadTimestampNoSegmentsUploaded(): Unit = { + val logStartOffset = 0 + val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) + + val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) + val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) + val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) + + // No offsets are in remote storage + doAnswer(_ => Optional.empty[TimestampAndOffset]()) + .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) + + // Offsets 0, 1, 2 (first, second and third timestamps) are in local storage only and not uploaded to remote storage. + log.updateLocalLogStartOffset(0) + log.updateHighestOffsetInRemoteStorage(-1) + + // In the assertions below we test that offset 0 (first timestamp), offset 1 (second timestamp) and offset 2 (third timestamp) are only on the local storage. + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1L, Optional.of(-1)), + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), + ListOffsetsRequest.LATEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + } + + @Test + def testFetchEarliestPendingUploadTimestampStaleHighestOffsetInRemote(): Unit = { + val logStartOffset = 100 + val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) + + val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) + val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) + val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) + + // Offsets 100, 101, 102 (first, second and third timestamps) are in local storage and not uploaded to remote storage. + // Tiered storage copy was disabled and then enabled again, because of which the remote log segments are deleted but + // the highest offset in remote storage has become stale + doAnswer(_ => Optional.empty[TimestampAndOffset]()) + .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) + + log.updateLocalLogStartOffset(100) + log.updateHighestOffsetInRemoteStorage(50) + + // In the assertions below we test that offset 100 (first timestamp), offset 101 (second timestamp) and offset 102 (third timestamp) are only on the local storage. + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 100L, Optional.of(firstLeaderEpoch))), firstTimestamp) + assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 101L, Optional.of(secondLeaderEpoch))), secondTimestamp) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 50L, Optional.empty()), + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 103L, Optional.of(thirdLeaderEpoch)), + ListOffsetsRequest.LATEST_TIMESTAMP) + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + } + + private def prepare(logStartOffset: Int): (RemoteLogManager, UnifiedLog, Seq[TimestampAndEpoch]) = { + val config: KafkaConfig = createKafkaConfigWithRLM + val purgatory = new DelayedOperationPurgatory[DelayedRemoteListOffsets]("RemoteListOffsets", config.brokerId) + val remoteLogManager = spy(new RemoteLogManager(config.remoteLogManagerConfig, + 0, + logDir.getAbsolutePath, + "clusterId", + mockTime, + _ => Optional.empty[UnifiedLog](), + (_, _) => {}, + brokerTopicStats, + new Metrics(), + Optional.empty)) + remoteLogManager.setDelayedOperationPurgatory(purgatory) + + val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1, remoteLogStorageEnable = true) + val log = createLog(logDir, logConfig, logStartOffset = logStartOffset, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) + + // Verify earliest pending upload offset for empty log + assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, logStartOffset, Optional.empty()), + ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) + + val timestampAndEpochs = prepareLogWithSequentialRecords(log, recordCount = 3) + (remoteLogManager, log, timestampAndEpochs) + } + /** * Test the Log truncate operations */ @@ -2734,7 +2921,7 @@ class UnifiedLogTest { @Test def testLeaderEpochCacheCreatedAfterMessageFormatUpgrade(): Unit = { val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000") + logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "1000") logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "65536") val logConfig = new LogConfig(logProps) @@ -2991,7 +3178,7 @@ class UnifiedLogTest { for (_ <- 0 until 15) log.appendAsLeader(createRecords, 0) - // mark oldest segment as older the retention.ms + // mark the oldest segment as older the retention.ms log.logSegments.asScala.head.setLastModified(mockTime.milliseconds - 20000) val segments = log.numberOfSegments @@ -3000,6 +3187,60 @@ class UnifiedLogTest { assertEquals(segments, log.numberOfSegments, "There should be 3 segments remaining") } + @Test + def shouldDeleteLocalLogSegmentsWhenPolicyIsEmptyWithSizeRetention(): Unit = { + def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = 10L) + val recordSize = createRecords.sizeInBytes + val logConfig = LogTestUtils.createLogConfig( + segmentBytes = recordSize * 2, + localRetentionBytes = recordSize / 2, + cleanupPolicy = "", + remoteLogStorageEnable = true + ) + val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) + + for (_ <- 0 until 10) + log.appendAsLeader(createRecords, 0) + + val segmentsBefore = log.numberOfSegments + log.updateHighWatermark(log.logEndOffset) + log.updateHighestOffsetInRemoteStorage(log.logEndOffset - 1) + val deleteOldSegments = log.deleteOldSegments() + + assertTrue(log.numberOfSegments < segmentsBefore, "Some segments should be deleted due to size retention") + assertTrue(deleteOldSegments > 0, "At least one segment should be deleted") + } + + @Test + def shouldDeleteLocalLogSegmentsWhenPolicyIsEmptyWithMsRetention(): Unit = { + val oldTimestamp = mockTime.milliseconds - 20000 + def oldRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = oldTimestamp) + val recordSize = oldRecords.sizeInBytes + val logConfig = LogTestUtils.createLogConfig( + segmentBytes = recordSize * 2, + localRetentionMs = 5000, + cleanupPolicy = "", + remoteLogStorageEnable = true + ) + val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) + + for (_ <- 0 until 10) + log.appendAsLeader(oldRecords, 0) + + def newRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = mockTime.milliseconds) + for (_ <- 0 until 5) + log.appendAsLeader(newRecords, 0) + + val segmentsBefore = log.numberOfSegments + + log.updateHighWatermark(log.logEndOffset) + log.updateHighestOffsetInRemoteStorage(log.logEndOffset - 1) + val deleteOldSegments = log.deleteOldSegments() + + assertTrue(log.numberOfSegments < segmentsBefore, "Some segments should be deleted due to time retention") + assertTrue(deleteOldSegments > 0, "At least one segment should be deleted") + } + @Test def shouldDeleteSegmentsReadyToBeDeletedWhenCleanupPolicyIsCompactAndDelete(): Unit = { def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes, timestamp = 10L) @@ -3110,7 +3351,7 @@ class UnifiedLogTest { log.deleteOldSegments() //The oldest epoch entry should have been removed - assertEquals(java.util.Arrays.asList(new EpochEntry(1, 5), new EpochEntry(2, 10)), cache.epochEntries) + assertEquals(util.List.of(new EpochEntry(1, 5), new EpochEntry(2, 10)), cache.epochEntries) } @Test @@ -3135,7 +3376,7 @@ class UnifiedLogTest { log.deleteOldSegments() //The first entry should have gone from (0,0) => (0,5) - assertEquals(java.util.Arrays.asList(new EpochEntry(0, 5), new EpochEntry(1, 7), new EpochEntry(2, 10)), cache.epochEntries) + assertEquals(util.List.of(new EpochEntry(0, 5), new EpochEntry(1, 7), new EpochEntry(2, 10)), cache.epochEntries) } @Test @@ -4008,12 +4249,13 @@ class UnifiedLogTest { @ParameterizedTest @EnumSource(value = classOf[AppendOrigin], names = Array("CLIENT", "COORDINATOR")) - def testTransactionIsOngoingAndVerificationGuard(appendOrigin: AppendOrigin): Unit = { + def testTransactionIsOngoingAndVerificationGuardTV2(appendOrigin: AppendOrigin): Unit = { val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) val producerId = 23L val producerEpoch = 1.toShort - var sequence = if (appendOrigin == AppendOrigin.CLIENT) 3 else 0 + // For TV2, when there's no existing producer state, sequence must be 0 for both CLIENT and COORDINATOR + var sequence = 0 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) @@ -4081,6 +4323,83 @@ class UnifiedLogTest { assertFalse(verificationGuard.verify(newVerificationGuard)) } + @ParameterizedTest + @EnumSource(value = classOf[AppendOrigin], names = Array("CLIENT", "COORDINATOR")) + def testTransactionIsOngoingAndVerificationGuardTV1(appendOrigin: AppendOrigin): Unit = { + val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, false) + + val producerId = 23L + val producerEpoch = 1.toShort + // For TV1, can start with non-zero sequences even with non-zero epoch when no existing producer state + var sequence = if (appendOrigin == AppendOrigin.CLIENT) 3 else 0 + val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) + val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) + assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) + assertFalse(log.verificationGuard(producerId).verify(VerificationGuard.SENTINEL)) + + val idempotentRecords = MemoryRecords.withIdempotentRecords( + Compression.NONE, + producerId, + producerEpoch, + sequence, + new SimpleRecord("1".getBytes), + new SimpleRecord("2".getBytes) + ) + + // Only clients have nonzero sequences + if (appendOrigin == AppendOrigin.CLIENT) + sequence = sequence + 2 + + val transactionalRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, + producerId, + producerEpoch, + sequence, + new SimpleRecord("1".getBytes), + new SimpleRecord("2".getBytes) + ) + + // For TV1, create verification guard with supportsEpochBump=false + val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) + assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) + + log.appendAsLeader(idempotentRecords, 0, appendOrigin) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) + + // Since we wrote idempotent records, we keep VerificationGuard. + assertEquals(verificationGuard, log.verificationGuard(producerId)) + + // Now write the transactional records + assertTrue(log.verificationGuard(producerId).verify(verificationGuard)) + log.appendAsLeader(transactionalRecords, 0, appendOrigin, RequestLocal.noCaching(), verificationGuard) + assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) + // VerificationGuard should be cleared now. + assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) + + // A subsequent maybeStartTransactionVerification will be empty since we are already verified. + assertEquals(VerificationGuard.SENTINEL, log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false)) + + val endTransactionMarkerRecord = MemoryRecords.withEndTransactionMarker( + producerId, + producerEpoch, + new EndTransactionMarker(ControlRecordType.COMMIT, 0) + ) + + log.appendAsLeader(endTransactionMarkerRecord, 0, AppendOrigin.COORDINATOR) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) + assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) + + if (appendOrigin == AppendOrigin.CLIENT) + sequence = sequence + 1 + + // A new maybeStartTransactionVerification will not be empty, as we need to verify the next transaction. + val newVerificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) + assertNotEquals(VerificationGuard.SENTINEL, newVerificationGuard) + assertNotEquals(verificationGuard, newVerificationGuard) + assertFalse(verificationGuard.verify(newVerificationGuard)) + } + @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testEmptyTransactionStillClearsVerificationGuard(supportsEpochBump: Boolean): Unit = { @@ -4165,7 +4484,7 @@ class UnifiedLogTest { val producerId = 23L val producerEpoch = 1.toShort - val sequence = 4 + val sequence = 0 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) @@ -4191,9 +4510,10 @@ class UnifiedLogTest { assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } - @Test - def testAllowNonZeroSequenceOnFirstAppendNonZeroEpoch(): Unit = { - val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testNonZeroSequenceOnFirstAppendNonZeroEpoch(transactionVerificationEnabled: Boolean): Unit = { + val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, transactionVerificationEnabled) val producerId = 23L val producerEpoch = 1.toShort @@ -4212,9 +4532,19 @@ class UnifiedLogTest { new SimpleRecord("2".getBytes) ) - val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) - // Append should not throw error. - log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard) + if (transactionVerificationEnabled) { + // TV2 behavior: Create verification state that supports epoch bumps + val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) + // Should reject non-zero sequences when there's no existing producer state + assertThrows(classOf[OutOfOrderSequenceException], () => + log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard)) + } else { + // TV1 behavior: Create verification state with supportsEpochBump=false + val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) + // Should allow non-zero sequences with non-zero epoch + log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard) + assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) + } } @Test @@ -4599,7 +4929,7 @@ class UnifiedLogTest { def testGetFirstBatchTimestampForSegments(): Unit = { val log = createLog(logDir, LogTestUtils.createLogConfig()) - val segments: java.util.List[LogSegment] = new java.util.ArrayList[LogSegment]() + val segments: util.List[LogSegment] = new util.ArrayList[LogSegment]() val seg1 = LogTestUtils.createSegment(1, logDir, 10, Time.SYSTEM) val seg2 = LogTestUtils.createSegment(2, logDir, 10, Time.SYSTEM) segments.add(seg1) @@ -4697,6 +5027,134 @@ class UnifiedLogTest { (log, segmentWithOverflow) } + + private def assertFetchOffsetByTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: Option[TimestampAndOffset], timestamp: Long): Unit = { + val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt) + val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader) + assertTrue(offsetResultHolder.futureHolderOpt.isPresent) + offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) + assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) + assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().hasTimestampAndOffset) + assertEquals(expected.get, offsetResultHolder.futureHolderOpt.get.taskFuture.get().timestampAndOffset().orElse(null)) + } + + private def assertFetchOffsetBySpecialTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: TimestampAndOffset, timestamp: Long): Unit = { + val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt) + val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader) + assertEquals(new OffsetResultHolder(expected), offsetResultHolder) + } + + private def getRemoteOffsetReader(remoteLogManagerOpt: Option[Any]): Optional[AsyncOffsetReader] = { + remoteLogManagerOpt match { + case Some(remoteLogManager) => Optional.of(remoteLogManager.asInstanceOf[AsyncOffsetReader]) + case None => Optional.empty[AsyncOffsetReader]() + } + } + + private def prepareLogWithSequentialRecords(log: UnifiedLog, recordCount: Int): Seq[TimestampAndEpoch] = { + val firstTimestamp = mockTime.milliseconds() + + (0 until recordCount).map { i => + val timestampAndEpoch = TimestampAndEpoch(firstTimestamp + i, i) + log.appendAsLeader( + TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = timestampAndEpoch.timestamp), + timestampAndEpoch.leaderEpoch + ) + timestampAndEpoch + } + } + + case class TimestampAndEpoch(timestamp: Long, leaderEpoch: Int) + + @Test + def testStaleProducerEpochReturnsRecoverableErrorForTV1Clients(): Unit = { + // Producer epoch gets incremented (coordinator fail over, completed transaction, etc.) + // and client has stale cached epoch. Fix prevents fatal InvalidTxnStateException. + + val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) + val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) + val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) + + val producerId = 123L + val oldEpoch = 5.toShort + val newEpoch = 6.toShort + + // Step 1: Simulate a scenario where producer epoch was incremented to fence the producer + val previousRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, producerId, newEpoch, 0, + new SimpleRecord("previous-key".getBytes, "previous-value".getBytes) + ) + val previousGuard = log.maybeStartTransactionVerification(producerId, 0, newEpoch, false) // TV1 = supportsEpochBump = false + log.appendAsLeader(previousRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, previousGuard) + + // Complete the transaction normally (commits do update producer state with current epoch) + val commitMarker = MemoryRecords.withEndTransactionMarker( + producerId, newEpoch, new EndTransactionMarker(ControlRecordType.COMMIT, 0) + ) + log.appendAsLeader(commitMarker, 0, AppendOrigin.COORDINATOR, RequestLocal.noCaching, VerificationGuard.SENTINEL) + + // Step 2: TV1 client tries to write with stale cached epoch (before learning about epoch increment) + val staleEpochRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, producerId, oldEpoch, 0, + new SimpleRecord("stale-epoch-key".getBytes, "stale-epoch-value".getBytes) + ) + + // Step 3: Verify our fix - should get InvalidProducerEpochException (recoverable), not InvalidTxnStateException (fatal) + val exception = assertThrows(classOf[InvalidProducerEpochException], () => { + val staleGuard = log.maybeStartTransactionVerification(producerId, 0, oldEpoch, false) + log.appendAsLeader(staleEpochRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, staleGuard) + }) + + // Verify the error message indicates epoch mismatch + assertTrue(exception.getMessage.contains("smaller than the last seen epoch")) + assertTrue(exception.getMessage.contains(s"$oldEpoch")) + assertTrue(exception.getMessage.contains(s"$newEpoch")) + } + + @Test + def testStaleProducerEpochReturnsRecoverableErrorForTV2Clients(): Unit = { + // Check producer epoch FIRST - if stale, return recoverable error before verification checks. + + val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) + val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) + val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) + + val producerId = 456L + val originalEpoch = 3.toShort + val bumpedEpoch = 4.toShort + + // Step 1: Start transaction with epoch 3 (before timeout) + val initialRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, producerId, originalEpoch, 0, + new SimpleRecord("ks-initial-key".getBytes, "ks-initial-value".getBytes) + ) + val initialGuard = log.maybeStartTransactionVerification(producerId, 0, originalEpoch, true) // TV2 = supportsEpochBump = true + log.appendAsLeader(initialRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, initialGuard) + + // Step 2: Coordinator times out and aborts transaction + // TV2 (KIP-890): Coordinator bumps epoch from 3 → 4 and sends abort marker with epoch 4 + val abortMarker = MemoryRecords.withEndTransactionMarker( + producerId, bumpedEpoch, new EndTransactionMarker(ControlRecordType.ABORT, 0) + ) + log.appendAsLeader(abortMarker, 0, AppendOrigin.COORDINATOR, RequestLocal.noCaching, VerificationGuard.SENTINEL) + + // Step 3: TV2 transactional producer tries to append with stale epoch (timeout recovery scenario) + val staleEpochRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, producerId, originalEpoch, 0, + new SimpleRecord("ks-resume-key".getBytes, "ks-resume-value".getBytes) + ) + + // Step 4: Verify our fix works for TV2 - should get InvalidProducerEpochException (recoverable), not InvalidTxnStateException (fatal) + val exception = assertThrows(classOf[InvalidProducerEpochException], () => { + val staleGuard = log.maybeStartTransactionVerification(producerId, 0, originalEpoch, true) // TV2 = supportsEpochBump = true + log.appendAsLeader(staleEpochRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, staleGuard) + }) + + // Verify the error message indicates epoch mismatch (3 < 4) + assertTrue(exception.getMessage.contains("smaller than the last seen epoch")) + assertTrue(exception.getMessage.contains(s"$originalEpoch")) + assertTrue(exception.getMessage.contains(s"$bumpedEpoch")) + } } object UnifiedLogTest { diff --git a/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala b/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala index 75946f14075d9..b0dbd0a05c25f 100644 --- a/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala @@ -22,15 +22,12 @@ import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.junit.jupiter.api.Assertions.{assertEquals, assertNull} import org.junit.jupiter.api.Test -import java.util.Collections -import scala.jdk.CollectionConverters._ - class KafkaMetricsGroupTest { @Test def testUntaggedMetricName(): Unit = { val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") - val metricName = metricsGroup.metricName("TaggedMetric", Collections.emptyMap()) + val metricName = metricsGroup.metricName("TaggedMetric", java.util.Map.of) assertEquals("kafka.metrics", metricName.getGroup) assertEquals("TestMetrics", metricName.getType) @@ -42,7 +39,13 @@ class KafkaMetricsGroupTest { @Test def testTaggedMetricName(): Unit = { - val tags = Map("foo" -> "bar", "bar" -> "baz", "baz" -> "raz.taz").asJava + val tags = { + val map = new java.util.LinkedHashMap[String, String]() + map.put("foo", "bar") + map.put("bar", "baz") + map.put("baz", "raz.taz") + map + } val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") val metricName = metricsGroup.metricName("TaggedMetric", tags) @@ -56,7 +59,13 @@ class KafkaMetricsGroupTest { @Test def testTaggedMetricNameWithEmptyValue(): Unit = { - val tags = Map("foo" -> "bar", "bar" -> "", "baz" -> "raz.taz").asJava + val tags = { + val map = new java.util.LinkedHashMap[String, String]() + map.put("foo", "bar") + map.put("bar", "") + map.put("baz", "raz.taz") + map + } val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") val metricName = metricsGroup.metricName("TaggedMetric", tags) @@ -67,6 +76,4 @@ class KafkaMetricsGroupTest { metricName.getMBeanName) assertEquals("baz.raz_taz.foo.bar", metricName.getScope) } - - } diff --git a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala index 0b83829681138..5d13fc97bfb16 100644 --- a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala @@ -18,6 +18,7 @@ package kafka.metrics import java.lang.management.ManagementFactory +import java.util import java.util.Properties import javax.management.ObjectName import com.yammer.metrics.core.MetricPredicate @@ -36,9 +37,9 @@ import org.apache.kafka.common.utils.Time import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics -import org.junit.jupiter.api.Timeout +import org.junit.jupiter.api.{Test, Timeout} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{MethodSource, ValueSource} +import org.junit.jupiter.params.provider.MethodSource @Timeout(120) class MetricsTest extends KafkaServerTestHarness with Logging { @@ -56,9 +57,8 @@ class MetricsTest extends KafkaServerTestHarness with Logging { val nMessages = 2 - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMetricsReporterAfterDeletingTopic(quorum: String): Unit = { + @Test + def testMetricsReporterAfterDeletingTopic(): Unit = { val topic = "test-topic-metric" createTopic(topic) deleteTopic(topic) @@ -66,9 +66,8 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testBrokerTopicMetricsUnregisteredAfterDeletingTopic(quorum: String): Unit = { + @Test + def testBrokerTopicMetricsUnregisteredAfterDeletingTopic(): Unit = { val topic = "test-broker-topic-metric" createTopic(topic, 2) // Produce a few messages to create the metrics @@ -81,33 +80,29 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterIdMetric(quorum: String): Unit = { + @Test + def testClusterIdMetric(): Unit = { // Check if clusterId metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=ClusterId"), 1) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testBrokerStateMetric(quorum: String): Unit = { + @Test + def testBrokerStateMetric(): Unit = { // Check if BrokerState metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=BrokerState"), 1) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testYammerMetricsCountMetric(quorum: String): Unit = { + @Test + def testYammerMetricsCountMetric(): Unit = { // Check if yammer-metrics-count metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=yammer-metrics-count"), 1) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testLinuxIoMetrics(quorum: String): Unit = { + @Test + def testLinuxIoMetrics(): Unit = { // Check if linux-disk-{read,write}-bytes metrics either do or do not exist depending on whether we are or are not // able to collect those metrics on the platform where this test is running. val usable = new LinuxIoMetricsCollector("/proc", Time.SYSTEM).usable() @@ -117,9 +112,8 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=$name"), expectedCount)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testJMXFilter(quorum: String): Unit = { + @Test + def testJMXFilter(): Unit = { // Check if cluster id metrics is not exposed in JMX assertTrue(ManagementFactory.getPlatformMBeanServer .isRegistered(new ObjectName("kafka.controller:type=KafkaController,name=ActiveControllerCount"))) @@ -127,12 +121,11 @@ class MetricsTest extends KafkaServerTestHarness with Logging { .isRegistered(new ObjectName(s"$requiredKafkaServerPrefix=ClusterId"))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUpdateJMXFilter(quorum: String): Unit = { + @Test + def testUpdateJMXFilter(): Unit = { // verify previously exposed metrics are removed and existing matching metrics are added brokers.foreach(broker => broker.kafkaYammerMetrics.reconfigure( - Map(JmxReporter.EXCLUDE_CONFIG -> "kafka.controller:type=KafkaController,name=ActiveControllerCount").asJava + util.Map.of(JmxReporter.EXCLUDE_CONFIG, "kafka.controller:type=KafkaController,name=ActiveControllerCount") )) assertFalse(ManagementFactory.getPlatformMBeanServer .isRegistered(new ObjectName("kafka.controller:type=KafkaController,name=ActiveControllerCount"))) @@ -140,9 +133,8 @@ class MetricsTest extends KafkaServerTestHarness with Logging { .isRegistered(new ObjectName(s"$requiredKafkaServerPrefix=ClusterId"))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGeneralBrokerTopicMetricsAreGreedilyRegistered(quorum: String): Unit = { + @Test + def testGeneralBrokerTopicMetricsAreGreedilyRegistered(): Unit = { val topic = "test-broker-topic-metric" createTopic(topic, 2) @@ -156,13 +148,16 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(topicMetricGroups(topic).nonEmpty, "Topic metrics aren't registered") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testWindowsStyleTagNames(quorum: String): Unit = { + @Test + def testWindowsStyleTagNames(): Unit = { val path = "C:\\windows-path\\kafka-logs" - val tags = Map("dir" -> path) - val expectedMBeanName = Set(tags.keySet.head, ObjectName.quote(path)).mkString("=") - val metric = new KafkaMetricsGroup(this.getClass).metricName("test-metric", tags.asJava) + val tags = util.Map.of("dir", path) + val expectedMBeanName = Set(tags.keySet().iterator().next(), ObjectName.quote(path)).mkString("=") + + // Changing the package or class name may cause incompatibility with existing code and metrics configuration + val metricsPackage = "kafka.metrics" + val metricsClassName = "MetricsTest" + val metric = new KafkaMetricsGroup(metricsPackage, metricsClassName).metricName("test-metric", tags) assert(metric.getMBeanName.endsWith(expectedMBeanName)) } @@ -213,9 +208,8 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(TestUtils.meterCount(bytesOut) > initialBytesOut) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testKRaftControllerMetrics(quorum: String): Unit = { + @Test + def testKRaftControllerMetrics(): Unit = { val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics Set( "kafka.controller:type=KafkaController,name=ActiveControllerCount", diff --git a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala index 3906011a20380..2d81c2a773bdf 100644 --- a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala @@ -20,7 +20,7 @@ package kafka.network import java.net.InetAddress import java.util import java.util.concurrent.{Callable, ExecutorService, Executors, TimeUnit} -import java.util.{Collections, Properties} +import java.util.Properties import com.yammer.metrics.core.Meter import kafka.network.Processor.ListenerMetricTag import kafka.server.KafkaConfig @@ -37,7 +37,6 @@ import org.apache.kafka.server.util.MockTime import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api._ -import scala.jdk.CollectionConverters._ import scala.collection.{Map, mutable} import scala.concurrent.TimeoutException @@ -89,14 +88,17 @@ class ConnectionQuotasTest { // Clean-up any metrics left around by previous tests TestUtils.clearYammerMetrics() + val metricsPackage = "kafka.network" + val metricsClassName = "ConnectionQuotasTest" + listeners.keys.foreach { name => - blockedPercentMeters.put(name, new KafkaMetricsGroup(this.getClass).newMeter( - s"${name}BlockedPercent", "blocked time", TimeUnit.NANOSECONDS, Map(ListenerMetricTag -> name).asJava)) + blockedPercentMeters.put(name, new KafkaMetricsGroup(metricsPackage, metricsClassName).newMeter( + s"${name}BlockedPercent", "blocked time", TimeUnit.NANOSECONDS, util.Map.of(ListenerMetricTag, name))) } // use system time, because ConnectionQuota causes the current thread to wait with timeout, which waits based on // system time; so using mock time will likely result in test flakiness due to a mixed use of mock and system time time = Time.SYSTEM - metrics = new Metrics(new MetricConfig(), Collections.emptyList(), time) + metrics = new Metrics(new MetricConfig(), util.List.of, time) executor = Executors.newFixedThreadPool(listeners.size) } @@ -282,7 +284,7 @@ class ConnectionQuotasTest { addListenersAndVerify(config, connectionQuotas) - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTIONS_CONFIG -> listenerMaxConnections.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, listenerMaxConnections.toString) listeners.values.foreach { listener => connectionQuotas.maxConnectionsPerListener(listener.listenerName).configure(listenerConfig) } @@ -374,7 +376,7 @@ class ConnectionQuotasTest { val config = KafkaConfig.fromProps(props) connectionQuotas = new ConnectionQuotas(config, time, metrics) - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) addListenersAndVerify(config, listenerConfig, connectionQuotas) // create connections with the rate < listener quota on every listener, and verify there is no throttling @@ -400,7 +402,7 @@ class ConnectionQuotasTest { val config = KafkaConfig.fromProps(props) connectionQuotas = new ConnectionQuotas(config, time, metrics) - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) addListenersAndVerify(config, listenerConfig, connectionQuotas) // create connections with the rate > listener quota on every listener @@ -498,7 +500,7 @@ class ConnectionQuotasTest { // with a default per-IP limit of 25 and a listener rate of 30, only one IP should be able to saturate their IP rate // limit, the other IP will hit listener rate limits and block connectionQuotas.updateIpConnectionRateQuota(None, Some(ipConnectionRateLimit)) - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) addListenersAndVerify(config, listenerConfig, connectionQuotas) val listener = listeners("EXTERNAL").listenerName // use a small number of connections because a longer-running test will have both IPs throttle at different times @@ -556,7 +558,7 @@ class ConnectionQuotasTest { connectionQuotas.addListener(config, listeners("EXTERNAL").listenerName) val maxListenerConnectionRate = 0 - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> maxListenerConnectionRate.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, maxListenerConnectionRate.toString) assertThrows(classOf[ConfigException], () => connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).validateReconfiguration(listenerConfig) ) @@ -569,11 +571,11 @@ class ConnectionQuotasTest { connectionQuotas.addListener(config, listeners("EXTERNAL").listenerName) val listenerRateLimit = 20 - val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava + val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).configure(listenerConfig) // remove connection rate limit - connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(Map.empty.asJava) + connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(util.Map.of) // create connections as fast as possible, will timeout if connections get throttled with previous rate // (50s to create 1000 connections) @@ -586,7 +588,7 @@ class ConnectionQuotasTest { // configure 100 connection/second rate limit val newMaxListenerConnectionRate = 10 - val newListenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> newMaxListenerConnectionRate.toString).asJava + val newListenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, newMaxListenerConnectionRate.toString) connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(newListenerConfig) // verify rate limit @@ -750,7 +752,7 @@ class ConnectionQuotasTest { } private def addListenersAndVerify(config: KafkaConfig, connectionQuotas: ConnectionQuotas) : Unit = { - addListenersAndVerify(config, Map.empty.asJava, connectionQuotas) + addListenersAndVerify(config, util.Map.of, connectionQuotas) } private def addListenersAndVerify(config: KafkaConfig, @@ -829,7 +831,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "connection-accept-throttle-time", SocketServer.MetricsGroup, - Collections.singletonMap(Processor.ListenerMetricTag, listener)) + util.Map.of(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -837,7 +839,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "ip-connection-accept-throttle-time", SocketServer.MetricsGroup, - Collections.singletonMap(Processor.ListenerMetricTag, listener)) + util.Map.of(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -845,7 +847,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "connection-accept-rate", SocketServer.MetricsGroup, - Collections.singletonMap(Processor.ListenerMetricTag, listener)) + util.Map.of(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -860,7 +862,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( s"connection-accept-rate", SocketServer.MetricsGroup, - Collections.singletonMap("ip", ip)) + util.Map.of("ip", ip)) metrics.metric(metricName) } diff --git a/core/src/test/scala/unit/kafka/network/ProcessorTest.scala b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala index 575f004fe0fce..54bbd0bf2018a 100644 --- a/core/src/test/scala/unit/kafka/network/ProcessorTest.scala +++ b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala @@ -31,8 +31,9 @@ import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable import org.mockito.Mockito.mock +import java.util import java.util.function.Supplier -import java.util.{Collections, Optional} +import java.util.Optional class ProcessorTest { @@ -41,7 +42,7 @@ class ProcessorTest { val requestHeader = RequestTestUtils.serializeRequestHeader( new RequestHeader(ApiKeys.INIT_PRODUCER_ID, 0, "clientid", 0)) val apiVersionManager = new SimpleApiVersionManager(ListenerType.CONTROLLER, true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) val e = assertThrows(classOf[InvalidRequestException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, "INIT_PRODUCER_ID with listener type CONTROLLER should throw InvalidRequestException exception") @@ -95,5 +96,4 @@ class ProcessorTest { assertTrue(e.toString.contains("unsupported version")) } } - } diff --git a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala index 1ab0f0ae8e80c..8dbfa808d7f45 100644 --- a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala +++ b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala @@ -17,7 +17,6 @@ package kafka.network - import com.fasterxml.jackson.databind.ObjectMapper import kafka.network import kafka.server.EnvelopeUtils @@ -47,9 +46,9 @@ import org.mockito.Mockito.mock import java.io.IOException import java.net.InetAddress import java.nio.ByteBuffer -import java.util.Collections +import java.util import java.util.concurrent.atomic.AtomicReference -import scala.collection.{Map, Seq} +import scala.collection.Map import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOption @@ -64,9 +63,9 @@ class RequestChannelTest { def testAlterRequests(): Unit = { val sensitiveValue = "secret" - def verifyConfig(resource: ConfigResource, entries: Seq[ConfigEntry], expectedValues: Map[String, String]): Unit = { + def verifyConfig(resource: ConfigResource, entries: util.List[ConfigEntry], expectedValues: Map[String, String]): Unit = { val alterConfigs = request(new AlterConfigsRequest.Builder( - Collections.singletonMap(resource, new Config(entries.asJavaCollection)), true).build()) + util.Map.of(resource, new Config(entries)), true).build()) val loggableAlterConfigs = alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest] val loggedConfig = loggableAlterConfigs.configs.get(resource) @@ -77,37 +76,37 @@ class RequestChannelTest { val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "1") val keystorePassword = new ConfigEntry(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sensitiveValue) - verifyConfig(brokerResource, Seq(keystorePassword), Map(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) + verifyConfig(brokerResource, util.List.of(keystorePassword), Map(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) val keystoreLocation = new ConfigEntry(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "/path/to/keystore") - verifyConfig(brokerResource, Seq(keystoreLocation), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore")) - verifyConfig(brokerResource, Seq(keystoreLocation, keystorePassword), + verifyConfig(brokerResource, util.List.of(keystoreLocation), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore")) + verifyConfig(brokerResource, util.List.of(keystoreLocation, keystorePassword), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore", SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) val listenerKeyPassword = new ConfigEntry(s"listener.name.internal.${SslConfigs.SSL_KEY_PASSWORD_CONFIG}", sensitiveValue) - verifyConfig(brokerResource, Seq(listenerKeyPassword), Map(listenerKeyPassword.name -> Password.HIDDEN)) + verifyConfig(brokerResource, util.List.of(listenerKeyPassword), Map(listenerKeyPassword.name -> Password.HIDDEN)) val listenerKeystore = new ConfigEntry(s"listener.name.internal.${SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG}", "/path/to/keystore") - verifyConfig(brokerResource, Seq(listenerKeystore), Map(listenerKeystore.name -> "/path/to/keystore")) + verifyConfig(brokerResource, util.List.of(listenerKeystore), Map(listenerKeystore.name -> "/path/to/keystore")) val plainJaasConfig = new ConfigEntry(s"listener.name.internal.plain.${SaslConfigs.SASL_JAAS_CONFIG}", sensitiveValue) - verifyConfig(brokerResource, Seq(plainJaasConfig), Map(plainJaasConfig.name -> Password.HIDDEN)) + verifyConfig(brokerResource, util.List.of(plainJaasConfig), Map(plainJaasConfig.name -> Password.HIDDEN)) val plainLoginCallback = new ConfigEntry(s"listener.name.internal.plain.${SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS}", "test.LoginClass") - verifyConfig(brokerResource, Seq(plainLoginCallback), Map(plainLoginCallback.name -> plainLoginCallback.value)) + verifyConfig(brokerResource, util.List.of(plainLoginCallback), Map(plainLoginCallback.name -> plainLoginCallback.value)) val customConfig = new ConfigEntry("custom.config", sensitiveValue) - verifyConfig(brokerResource, Seq(customConfig), Map(customConfig.name -> Password.HIDDEN)) + verifyConfig(brokerResource, util.List.of(customConfig), Map(customConfig.name -> Password.HIDDEN)) val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, "testTopic") val compressionType = new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") - verifyConfig(topicResource, Seq(compressionType), Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> "lz4")) - verifyConfig(topicResource, Seq(customConfig), Map(customConfig.name -> Password.HIDDEN)) + verifyConfig(topicResource, util.List.of(compressionType), Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> "lz4")) + verifyConfig(topicResource, util.List.of(customConfig), Map(customConfig.name -> Password.HIDDEN)) // Verify empty request val alterConfigs = request(new AlterConfigsRequest.Builder( - Collections.emptyMap[ConfigResource, Config], true).build()) - assertEquals(Collections.emptyMap, alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest].configs) + util.Map.of[ConfigResource, Config], true).build()) + assertEquals(util.Map.of, alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest].configs) } @Test @@ -131,11 +130,21 @@ class RequestChannelTest { op: OpType, entries: Map[String, String], expectedValues: Map[String, String]): Unit = { - val alterConfigs = request(incrementalAlterConfigs(resource, entries, op)) - val loggableAlterConfigs = alterConfigs.loggableRequest.asInstanceOf[IncrementalAlterConfigsRequest] + val alterConfigs = incrementalAlterConfigs(resource, entries, op) + val alterConfigsString = alterConfigs.toString + entries.foreach { entry => + if (!alterConfigsString.contains(entry._1)) { + fail("Config names should be in the request string") + } + if (entry._2 != null && alterConfigsString.contains(entry._2)) { + fail("Config values should not be in the request string") + } + } + val req = request(alterConfigs) + val loggableAlterConfigs = req.loggableRequest.asInstanceOf[IncrementalAlterConfigsRequest] val loggedConfig = loggableAlterConfigs.data.resources.find(resource.`type`.id, resource.name).configs assertEquals(expectedValues, toMap(loggedConfig)) - val alterConfigsDesc = RequestConvertToJson.requestDesc(alterConfigs.header, alterConfigs.requestLog.toJava, alterConfigs.isForwarded).toString + val alterConfigsDesc = RequestConvertToJson.requestDesc(req.header, req.requestLog.toJava, req.isForwarded).toString assertFalse(alterConfigsDesc.contains(sensitiveValue), s"Sensitive config logged $alterConfigsDesc") } @@ -179,7 +188,7 @@ class RequestChannelTest { @Test def testNonAlterRequestsNotTransformed(): Unit = { - val metadataRequest = request(new MetadataRequest.Builder(List("topic").asJava, true).build()) + val metadataRequest = request(new MetadataRequest.Builder(util.List.of("topic"), true).build()) assertSame(metadataRequest.body[MetadataRequest], metadataRequest.loggableRequest) } @@ -188,10 +197,10 @@ class RequestChannelTest { val sensitiveValue = "secret" val resource = new ConfigResource(ConfigResource.Type.BROKER, "1") val keystorePassword = new ConfigEntry(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sensitiveValue) - val entries = Seq(keystorePassword) + val entries = util.List.of(keystorePassword) - val alterConfigs = request(new AlterConfigsRequest.Builder(Collections.singletonMap(resource, - new Config(entries.asJavaCollection)), true).build()) + val alterConfigs = request(new AlterConfigsRequest.Builder(util.Map.of(resource, + new Config(entries)), true).build()) assertTrue(isValidJson(RequestConvertToJson.request(alterConfigs.loggableRequest).toString)) } diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 1d793e726b81d..6a4b8d8ca672e 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -22,6 +22,7 @@ import com.yammer.metrics.core.{Gauge, Meter} import kafka.server._ import kafka.utils.Implicits._ import kafka.utils.TestUtils +import org.apache.kafka.common.Endpoint import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.{ProduceRequestData, SaslAuthenticateRequestData, SaslHandshakeRequestData, VoteRequestData} @@ -36,7 +37,6 @@ import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.utils._ import org.apache.kafka.network.RequestConvertToJson import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.network.EndPoint import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.{ApiVersionManager, SimpleApiVersionManager} import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} @@ -59,7 +59,7 @@ import java.security.cert.X509Certificate import java.util import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent._ -import java.util.{Collections, Properties, Random} +import java.util.{Properties, Random} import javax.net.ssl._ import scala.collection.mutable import scala.collection.mutable.ArrayBuffer @@ -85,16 +85,16 @@ class SocketServerTest { TestUtils.clearYammerMetrics() private val apiVersionManager = new SimpleApiVersionManager(ListenerType.BROKER, true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) var server: SocketServer = _ val sockets = new ArrayBuffer[Socket] private val kafkaLogger = LogManager.getLogger("kafka") private var logLevelToRestore: Level = _ - def endpoint: EndPoint = { + def endpoint: Endpoint = { KafkaConfig.fromProps(props, doLog = false).dataPlaneListeners.head } - def listener: String = endpoint.listenerName.value + def listener: String = endpoint.listener val uncaughtExceptions = new AtomicInteger(0) @BeforeEach @@ -840,7 +840,7 @@ class SocketServerTest { // same as SocketServer.createAcceptor, // except the Acceptor overriding a method to inject the exception - override protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { + override protected def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { new DataPlaneAcceptor(this, endPoint, this.config, nodeId, connectionQuotas, time, false, requestChannel, serverMetrics, this.credentialProvider, new LogContext(), MemoryPool.NONE, this.apiVersionManager) { override protected def configureAcceptedSocketChannel(socketChannel: SocketChannel): Unit = { @@ -1722,7 +1722,7 @@ class SocketServerTest { val testableServer = new TestableSocketServer(KafkaConfig.fromProps(props), connectionQueueSize = 1) testableServer.enableRequestProcessing(Map.empty).get(1, TimeUnit.MINUTES) val testableSelector = testableServer.testableSelector - val errors = new mutable.HashSet[String] + val errors = new util.HashSet[String]() def acceptorStackTraces: scala.collection.Map[Thread, String] = { Thread.getAllStackTraces.asScala.collect { @@ -1746,7 +1746,7 @@ class SocketServerTest { // Block selector until Acceptor is blocked while connections are pending testableSelector.pollCallback = () => { try { - TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked, + TestUtils.waitUntilTrue(() => !errors.isEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked, "Acceptor not blocked", waitTimeMs = 10000) } catch { case _: Throwable => errors.add(s"Acceptor not blocked: $acceptorStackTraces") @@ -1754,9 +1754,9 @@ class SocketServerTest { } testableSelector.operationCounts.clear() val sockets = (1 to numConnections).map(_ => connect(testableServer)) - TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount == numConnections, + TestUtils.waitUntilTrue(() => !errors.isEmpty || registeredConnectionCount == numConnections, "Connections not registered", waitTimeMs = 15000) - assertEquals(Set.empty, errors) + assertEquals(util.Set.of, errors) testableSelector.waitForOperations(SelectorOperation.Register, numConnections) // In each iteration, SocketServer processes at most connectionQueueSize (1 in this test) @@ -1858,7 +1858,7 @@ class SocketServerTest { val failedFuture = new CompletableFuture[Void]() failedFuture.completeExceptionally(new RuntimeException("authorizer startup failed")) assertThrows(classOf[ExecutionException], () => { - newServer.enableRequestProcessing(Map(endpoint.toPublic -> failedFuture)).get() + newServer.enableRequestProcessing(Map(endpoint -> failedFuture)).get() }) } finally { shutdownServerAndMetrics(newServer) @@ -1891,7 +1891,7 @@ class SocketServerTest { val authorizerFuture = new CompletableFuture[Void]() val enableFuture = newServer.enableRequestProcessing( newServer.dataPlaneAcceptors.keys().asScala. - map(_.toPublic).map(k => k -> authorizerFuture).toMap) + map(k => k -> authorizerFuture).toMap) assertFalse(authorizerFuture.isDone) assertFalse(enableFuture.isDone) newServer.dataPlaneAcceptors.values().forEach(a => assertNull(a.serverChannel)) @@ -1992,7 +1992,7 @@ class SocketServerTest { } class TestableAcceptor(socketServer: SocketServer, - endPoint: EndPoint, + endPoint: Endpoint, cfg: KafkaConfig, nodeId: Int, connectionQuotas: ConnectionQuotas, @@ -2061,7 +2061,7 @@ class SocketServerTest { private var conn: Option[Socket] = None override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = { - new TestableSelector(config, channelBuilder, time, metrics, metricTags.asScala) + new TestableSelector(config, channelBuilder, time, metrics, metricTags) } override private[network] def processException(errorMessage: String, throwable: Throwable): Unit = { @@ -2098,7 +2098,7 @@ class SocketServerTest { connectionDisconnectListeners = connectionDisconnectListeners ) { - override def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel) : DataPlaneAcceptor = { + override def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel) : DataPlaneAcceptor = { new TestableAcceptor(this, endPoint, this.config, 0, connectionQuotas, time, isPrivilegedListener, requestChannel, this.metrics, this.credentialProvider, new LogContext, MemoryPool.NONE, this.apiVersionManager, connectionQueueSize) } @@ -2159,9 +2159,9 @@ class SocketServerTest { case object CloseSelector extends SelectorOperation } - class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: mutable.Map[String, String] = mutable.Map.empty) + class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: util.Map[String, String] = new util.HashMap()) extends Selector(config.socketRequestMaxBytes, config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, - metrics, time, "socket-server", metricTags.asJava, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) { + metrics, time, "socket-server", metricTags, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) { val failures = mutable.Map[SelectorOperation, Throwable]() val operationCounts = mutable.Map[SelectorOperation, Int]().withDefaultValue(0) diff --git a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala index 4255648347cfc..117c2b63978d8 100644 --- a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala +++ b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala @@ -22,19 +22,21 @@ import java.nio.channels.OverlappingFileLockException import java.nio.file.{Files, Path, StandardOpenOption} import java.util.Properties import java.util.concurrent.CompletableFuture -import kafka.log.LogManager import kafka.server.KafkaConfig import kafka.tools.TestRaftServer.ByteArraySerde import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.Uuid import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.utils.Time +import org.apache.kafka.common.utils.Utils import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.{Endpoints, MetadataLogConfig, QuorumConfig} import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.FaultHandler +import org.apache.kafka.storage.internals.log.LogManager import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest @@ -88,7 +90,7 @@ class RaftManagerTest { val endpoints = Endpoints.fromInetSocketAddresses( config.effectiveAdvertisedControllerListeners .map { endpoint => - (endpoint.listenerName, InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) + (ListenerName.normalised(endpoint.listener), InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) } .toMap .asJava @@ -125,17 +127,21 @@ class RaftManagerTest { val logDir = TestUtils.tempDir() val nodeId = 1 - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - processRolesSet, - nodeId, - Seq(logDir.toPath), - None + try { + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + processRolesSet, + nodeId, + Seq(logDir.toPath), + None + ) ) - ) - assertEquals(nodeId, raftManager.client.nodeId.getAsInt) - raftManager.shutdown() + assertEquals(nodeId, raftManager.client.nodeId.getAsInt) + raftManager.shutdown() + } finally { + Utils.delete(logDir) + } } @ParameterizedTest @@ -154,22 +160,27 @@ class RaftManagerTest { } val nodeId = 1 - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - Set(ProcessRole.ControllerRole), - nodeId, - logDir, - metadataDir + try { + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + Set(ProcessRole.ControllerRole), + nodeId, + logDir, + metadataDir + ) ) - ) - val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LockFileName) - assertTrue(fileLocked(lockPath)) + val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME) + assertTrue(fileLocked(lockPath)) - raftManager.shutdown() + raftManager.shutdown() - assertFalse(fileLocked(lockPath)) + assertFalse(fileLocked(lockPath)) + } finally { + logDir.foreach(p => Utils.delete(p.toFile)) + metadataDir.foreach(p => Utils.delete(p.toFile)) + } } @Test @@ -178,22 +189,27 @@ class RaftManagerTest { val metadataDir = Some(TestUtils.tempDir().toPath) val nodeId = 1 - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - Set(ProcessRole.BrokerRole), - nodeId, - logDir, - metadataDir + try { + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + Set(ProcessRole.BrokerRole), + nodeId, + logDir, + metadataDir + ) ) - ) - val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LockFileName) - assertTrue(fileLocked(lockPath)) + val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME) + assertTrue(fileLocked(lockPath)) - raftManager.shutdown() + raftManager.shutdown() - assertFalse(fileLocked(lockPath)) + assertFalse(fileLocked(lockPath)) + } finally { + logDir.foreach(p => Utils.delete(p.toFile)) + metadataDir.foreach(p => Utils.delete(p.toFile)) + } } def createMetadataLog(config: KafkaConfig): Unit = { diff --git a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala index 0e2105f85d2d1..0839990868fdd 100644 --- a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala +++ b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala @@ -41,18 +41,15 @@ import org.apache.kafka.server.authorizer._ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Test -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import java.net.InetAddress import java.util -import java.util.{Collections, Properties, UUID} +import java.util.{Properties, UUID} import scala.jdk.CollectionConverters._ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { private final val PLAINTEXT = new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "127.0.0.1", 9020) - private final val KRAFT = "kraft" private val allowReadAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, READ, ALLOW) private val allowWriteAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, WRITE, ALLOW) @@ -104,32 +101,28 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAuthorizeThrowsOnNonLiteralResource(quorum: String): Unit = { + @Test + def testAuthorizeThrowsOnNonLiteralResource(): Unit = { assertThrows(classOf[IllegalArgumentException], () => authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "something", PREFIXED))) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAuthorizeWithEmptyResourceName(quorum: String): Unit = { + @Test + def testAuthorizeWithEmptyResourceName(): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL))) addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(GROUP, WILDCARD_RESOURCE, LITERAL)) assertTrue(authorize(authorizer1, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL))) } // Authorizing the empty resource is not supported because empty resource name is invalid. - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testEmptyAclThrowsException(quorum: String): Unit = { + @Test + def testEmptyAclThrowsException(): Unit = { assertThrows(classOf[ApiException], () => addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(GROUP, "", LITERAL))) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testTopicAcl(quorum: String): Unit = { + @Test + def testTopicAcl(): Unit = { val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "rob") val user3 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "batman") @@ -183,9 +176,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { /** * CustomPrincipals should be compared with their principal type and name */ - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAllowAccessWithCustomPrincipal(quorum: String): Unit = { + @Test + def testAllowAccessWithCustomPrincipal(): Unit = { val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val customUserPrincipal = new CustomPrincipal(KafkaPrincipal.USER_TYPE, username) val host1 = InetAddress.getByName("192.168.1.1") @@ -204,9 +196,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, host1Context, READ, resource), "User1 should not have READ access from host1 due to denyAcl") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testDenyTakesPrecedence(quorum: String): Unit = { + @Test + def testDenyTakesPrecedence(): Unit = { val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val host = InetAddress.getByName("192.168.2.1") val session = newRequestContext(user, host) @@ -220,9 +211,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, session, READ, resource), "deny should take precedence over allow.") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAllowAllAccess(quorum: String): Unit = { + @Test + def testAllowAllAccess(): Unit = { val allowAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, ALLOW) changeAclAndVerify(Set.empty, Set(allowAllAcl), Set.empty) @@ -231,9 +221,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, context, READ, resource), "allow all acl should allow access to all.") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testSuperUserHasAccess(quorum: String): Unit = { + @Test + def testSuperUserHasAccess(): Unit = { val denyAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, DENY) changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty) @@ -248,9 +237,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { /** * CustomPrincipals should be compared with their principal type and name */ - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testSuperUserWithCustomPrincipalHasAccess(quorum: String): Unit = { + @Test + def testSuperUserWithCustomPrincipalHasAccess(): Unit = { val denyAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, DENY) changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty) @@ -259,9 +247,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, session, READ, resource), "superuser with custom principal always has access, no matter what acls.") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testWildCardAcls(quorum: String): Unit = { + @Test + def testWildCardAcls(): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, resource), "when acls = [], authorizer should fail close.") val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) @@ -284,15 +271,13 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, host1Context, WRITE, resource), "User1 should not have WRITE access from host1") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testNoAclFound(quorum: String): Unit = { + @Test + def testNoAclFound(): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, resource), "when acls = [], authorizer should deny op.") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testNoAclFoundOverride(quorum: String): Unit = { + @Test + def testNoAclFoundOverride(): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true") @@ -307,9 +292,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAclConfigWithWhitespace(quorum: String): Unit = { + @Test + def testAclConfigWithWhitespace(): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, " true") // replace all property values with leading & trailing whitespaces @@ -325,9 +309,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAclManagementAPIs(quorum: String): Unit = { + @Test + def testAclManagementAPIs(): Unit = { val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob") val host1 = "host1" @@ -393,9 +376,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { /** * Test ACL inheritance, as described in #{org.apache.kafka.common.acl.AclOperation} */ - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAclInheritance(quorum: String): Unit = { + @Test + def testAclInheritance(): Unit = { testImplicationsOfAllow(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, CREATE_TOKENS, DESCRIBE_TOKENS, TWO_PHASE_COMMIT)) testImplicationsOfDeny(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, @@ -442,17 +424,15 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { removeAcls(authorizer1, acls, clusterResource) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAccessAllowedIfAllowAclExistsOnWildcardResource(quorum: String): Unit = { + @Test + def testAccessAllowedIfAllowAclExistsOnWildcardResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl), wildCardResource) assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testDeleteAclOnWildcardResource(quorum: String): Unit = { + @Test + def testDeleteAclOnWildcardResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), wildCardResource) removeAcls(authorizer1, Set(allowReadAcl), wildCardResource) @@ -460,9 +440,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set(allowWriteAcl), getAcls(authorizer1, wildCardResource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testDeleteAllAclOnWildcardResource(quorum: String): Unit = { + @Test + def testDeleteAllAclOnWildcardResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl), wildCardResource) removeAcls(authorizer1, Set.empty, wildCardResource) @@ -470,17 +449,15 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAccessAllowedIfAllowAclExistsOnPrefixedResource(quorum: String): Unit = { + @Test + def testAccessAllowedIfAllowAclExistsOnPrefixedResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl), prefixedResource) assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testDeleteAclOnPrefixedResource(quorum: String): Unit = { + @Test + def testDeleteAclOnPrefixedResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) removeAcls(authorizer1, Set(allowReadAcl), prefixedResource) @@ -488,9 +465,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set(allowWriteAcl), getAcls(authorizer1, prefixedResource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testDeleteAllAclOnPrefixedResource(quorum: String): Unit = { + @Test + def testDeleteAllAclOnPrefixedResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) removeAcls(authorizer1, Set.empty, prefixedResource) @@ -498,9 +474,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAddAclsOnLiteralResource(quorum: String): Unit = { + @Test + def testAddAclsOnLiteralResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), resource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), resource) @@ -509,9 +484,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, prefixedResource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAddAclsOnWildcardResource(quorum: String): Unit = { + @Test + def testAddAclsOnWildcardResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), wildCardResource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), wildCardResource) @@ -520,9 +494,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, prefixedResource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAddAclsOnPrefixedResource(quorum: String): Unit = { + @Test + def testAddAclsOnPrefixedResource(): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), prefixedResource) @@ -531,9 +504,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, resource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAuthorizeWithPrefixedResource(quorum: String): Unit = { + @Test + def testAuthorizeWithPrefixedResource(): Unit = { addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", LITERAL)) addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", PREFIXED)) addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), PREFIXED)) @@ -552,9 +524,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testSingleCharacterResourceAcls(quorum: String): Unit = { + @Test + def testSingleCharacterResourceAcls(): Unit = { addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(TOPIC, "f", LITERAL)) assertTrue(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "f", LITERAL))) assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "foo", LITERAL))) @@ -565,9 +536,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "foo_", LITERAL))) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testGetAclsPrincipal(quorum: String): Unit = { + @Test + def testGetAclsPrincipal(): Unit = { val aclOnSpecificPrincipal = new AccessControlEntry(principal.toString, WILDCARD_HOST, WRITE, ALLOW) addAcls(authorizer1, Set(aclOnSpecificPrincipal), resource) @@ -586,9 +556,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(0, getAcls(authorizer1, principal).size, "acl on wildcard should not be returned for specific request") } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAclsFilter(quorum: String): Unit = { + @Test + def testAclsFilter(): Unit = { val resource1 = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL) val resource2 = new ResourcePattern(TOPIC, "bar-" + UUID.randomUUID(), LITERAL) val prefixedResource = new ResourcePattern(TOPIC, "bar-", PREFIXED) @@ -598,7 +567,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { val acl3 = new AclBinding(resource2, new AccessControlEntry(principal.toString, WILDCARD_HOST, DESCRIBE, ALLOW)) val acl4 = new AclBinding(prefixedResource, new AccessControlEntry(wildcardPrincipal.toString, WILDCARD_HOST, READ, ALLOW)) - authorizer1.createAcls(requestContext, List(acl1, acl2, acl3, acl4).asJava) + authorizer1.createAcls(requestContext, util.List.of(acl1, acl2, acl3, acl4)) assertEquals(Set(acl1, acl2, acl3, acl4), authorizer1.acls(AclBindingFilter.ANY).asScala.toSet) assertEquals(Set(acl1, acl2), authorizer1.acls(new AclBindingFilter(resource1.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet) assertEquals(Set(acl4), authorizer1.acls(new AclBindingFilter(prefixedResource.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet) @@ -622,9 +591,8 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, deleteResults(3).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet) } - @ParameterizedTest - @ValueSource(strings = Array(KRAFT)) - def testAuthorizeByResourceTypeNoAclFoundOverride(quorum: String): Unit = { + @Test + def testAuthorizeByResourceTypeNoAclFoundOverride(): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true") @@ -664,7 +632,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { private def authorize(authorizer: Authorizer, requestContext: RequestContext, operation: AclOperation, resource: ResourcePattern): Boolean = { val action = new Action(operation, resource, 1, true, true) - authorizer.authorize(requestContext, List(action).asJava).asScala.head == AuthorizationResult.ALLOWED + authorizer.authorize(requestContext, util.List.of(action)).asScala.head == AuthorizationResult.ALLOWED } private def getAcls(authorizer: Authorizer, resourcePattern: ResourcePattern): Set[AccessControlEntry] = { @@ -701,7 +669,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { pluginMetrics: PluginMetrics): Unit = { standardAuthorizer.configure(configs) standardAuthorizer.withPluginMetrics(pluginMetrics) - initializeStandardAuthorizer(standardAuthorizer, new AuthorizerTestServerInfo(Collections.singletonList(PLAINTEXT))) + initializeStandardAuthorizer(standardAuthorizer, new AuthorizerTestServerInfo(util.List.of(PLAINTEXT))) } def initializeStandardAuthorizer(standardAuthorizer: StandardAuthorizer, diff --git a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala index 88d25b65d934c..958c8440c2cf9 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala @@ -25,29 +25,20 @@ import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse, RequestUtils} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.utils.Utils -import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, ShareVersion, TransactionVersion} +import org.apache.kafka.server.IntegrationTestUtils +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, ShareVersion, StreamsVersion, TransactionVersion} import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Tag import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOptional @Tag("integration") abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { - def sendApiVersionsRequest(request: ApiVersionsRequest, listenerName: ListenerName): ApiVersionsResponse = { - val socket = if (cluster.controllerListenerName().toScala.contains(listenerName)) { - cluster.controllerSocketServers().asScala.head - } else { - cluster.brokerSocketServers().asScala.head - } - IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, socket, listenerName) - } - def sendUnsupportedApiVersionRequest(request: ApiVersionsRequest): ApiVersionsResponse = { val overrideHeader = IntegrationTestUtils.nextRequestHeader(ApiKeys.API_VERSIONS, Short.MaxValue) - val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) + val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) try { val serializedBytes = Utils.toArray( RequestUtils.serialize(overrideHeader.data, overrideHeader.headerVersion, request.data, request.version)) @@ -64,11 +55,11 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { apiVersion: Short = ApiKeys.API_VERSIONS.latestVersion ): Unit = { if (apiVersion >= 3) { - assertEquals(5, apiVersionsResponse.data().finalizedFeatures().size()) + assertEquals(6, apiVersionsResponse.data().finalizedFeatures().size()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).minVersionLevel()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).maxVersionLevel()) - assertEquals(6, apiVersionsResponse.data().supportedFeatures().size()) + assertEquals(7, apiVersionsResponse.data().supportedFeatures().size()) assertEquals(MetadataVersion.MINIMUM_VERSION.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(MetadataVersion.FEATURE_NAME).minVersion()) if (apiVersion < 4) { assertEquals(1, apiVersionsResponse.data().supportedFeatures().find("kraft.version").minVersion()) @@ -88,8 +79,11 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(ShareVersion.FEATURE_NAME).minVersion()) assertEquals(ShareVersion.SV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(ShareVersion.FEATURE_NAME).maxVersion()) + + assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(StreamsVersion.FEATURE_NAME).minVersion()) + assertEquals(StreamsVersion.SV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(StreamsVersion.FEATURE_NAME).maxVersion()) } - val expectedApis = if (cluster.controllerListenerName().toScala.contains(listenerName)) { + val expectedApis = if (cluster.controllerListenerName() == listenerName) { ApiVersionsResponse.collectApis( ApiMessageType.ListenerType.CONTROLLER, ApiKeys.apisForListener(ApiMessageType.ListenerType.CONTROLLER), @@ -107,7 +101,7 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(expectedApis.size, apiVersionsResponse.data.apiKeys.size, "API keys in ApiVersionsResponse must match API keys supported by broker.") - val defaultApiVersionsResponse = if (cluster.controllerListenerName().toScala.contains(listenerName)) { + val defaultApiVersionsResponse = if (cluster.controllerListenerName() == listenerName) { TestUtils.defaultApiVersionsResponse(0, ListenerType.CONTROLLER, enableUnstableLastVersion) } else { TestUtils.createApiVersionsResponse(0, expectedApis) diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala index 12bf55e42a603..5577dc9bd38fd 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala @@ -17,16 +17,22 @@ package kafka.server import com.yammer.metrics.core.Gauge -import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.TestUtils +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.FetchResponseData.PartitionData import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset +import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.requests.FetchRequest -import org.apache.kafka.common.utils.Utils +import org.apache.kafka.common.utils.{MockTime, Utils} import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.common.{DirectoryEventHandler, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.ReplicaFetch +import org.apache.kafka.server.ReplicaState +import org.apache.kafka.server.ResultWithPartitions +import org.apache.kafka.server.PartitionFetchState +import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ @@ -36,6 +42,7 @@ import org.mockito.Mockito.{mock, verify, when} import java.util.Optional import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ class AbstractFetcherManagerTest { @@ -61,9 +68,9 @@ class AbstractFetcherManagerTest { val fetchOffset = 10L val leaderEpoch = 15 val tp = new TopicPartition("topic", 0) - val topicId = Some(Uuid.randomUuid()) + val topicId = Uuid.randomUuid() val initialFetchState = InitialFetchState( - topicId = topicId, + topicId = Some(topicId), leader = new BrokerEndPoint(0, "localhost", 9092), currentLeaderEpoch = leaderEpoch, initOffset = fetchOffset) @@ -73,7 +80,7 @@ class AbstractFetcherManagerTest { when(fetcher.addPartitions(Map(tp -> initialFetchState))) .thenReturn(Set(tp)) when(fetcher.fetchState(tp)) - .thenReturn(Some(PartitionFetchState(topicId, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = Optional.empty))) + .thenReturn(Some(new PartitionFetchState(Optional.of(topicId), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) .thenReturn(None) when(fetcher.removePartitions(Set(tp))).thenReturn(Map.empty[TopicPartition, PartitionFetchState]) @@ -123,9 +130,9 @@ class AbstractFetcherManagerTest { val fetchOffset = 10L val leaderEpoch = 15 val tp = new TopicPartition("topic", 0) - val topicId = Some(Uuid.randomUuid()) + val topicId = Uuid.randomUuid() val initialFetchState = InitialFetchState( - topicId = topicId, + topicId = Some(topicId), leader = new BrokerEndPoint(0, "localhost", 9092), currentLeaderEpoch = leaderEpoch, initOffset = fetchOffset) @@ -159,8 +166,8 @@ class AbstractFetcherManagerTest { val tp1 = new TopicPartition("topic1", 0) val tp2 = new TopicPartition("topic2", 0) val unknownTp = new TopicPartition("topic2", 1) - val topicId1 = Some(Uuid.randomUuid()) - val topicId2 = Some(Uuid.randomUuid()) + val topicId1 = Uuid.randomUuid() + val topicId2 = Uuid.randomUuid() // Start out with no topic ID. val initialFetchState1 = InitialFetchState( @@ -185,13 +192,13 @@ class AbstractFetcherManagerTest { .thenReturn(Set(tp2)) when(fetcher.fetchState(tp1)) - .thenReturn(Some(PartitionFetchState(None, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = Optional.empty))) - .thenReturn(Some(PartitionFetchState(topicId1, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = Optional.empty))) + .thenReturn(Some(new PartitionFetchState(Optional.empty, fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) + .thenReturn(Some(new PartitionFetchState(Optional.of(topicId1), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) when(fetcher.fetchState(tp2)) - .thenReturn(Some(PartitionFetchState(None, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = Optional.empty))) - .thenReturn(Some(PartitionFetchState(topicId2, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = Optional.empty))) + .thenReturn(Some(new PartitionFetchState(Optional.empty, fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) + .thenReturn(Some(new PartitionFetchState(Optional.of(topicId2), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) - val topicIds = Map(tp1.topic -> topicId1, tp2.topic -> topicId2) + val topicIds = Map(tp1.topic -> Some(topicId1), tp2.topic -> Some(topicId2)) // When targeting a fetcher that doesn't exist, we will not see fetcher.maybeUpdateTopicIds called. // We will see it for a topic partition that does not exist. @@ -200,7 +207,7 @@ class AbstractFetcherManagerTest { def verifyFetchState(fetchState: Option[PartitionFetchState], expectedTopicId: Option[Uuid]): Unit = { assertTrue(fetchState.isDefined) - assertEquals(expectedTopicId, fetchState.get.topicId) + assertEquals(expectedTopicId, fetchState.get.topicId.toScala) } fetcherManager.addFetcherForPartitions(Map(tp1 -> initialFetchState1, tp2 -> initialFetchState2)) @@ -209,8 +216,8 @@ class AbstractFetcherManagerTest { val partitionsToUpdate = Map(tp1 -> initialFetchState1.leader.id, tp2 -> initialFetchState2.leader.id) fetcherManager.maybeUpdateTopicIds(partitionsToUpdate, topicIds) - verifyFetchState(fetcher.fetchState(tp1), topicId1) - verifyFetchState(fetcher.fetchState(tp2), topicId2) + verifyFetchState(fetcher.fetchState(tp1), Some(topicId1)) + verifyFetchState(fetcher.fetchState(tp2), Some(topicId2)) // Try an invalid fetcher and an invalid topic partition val invalidPartitionsToUpdate = Map(tp1 -> 2, unknownTp -> initialFetchState1.leader.id) @@ -298,15 +305,15 @@ class AbstractFetcherManagerTest { override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = Map.empty + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = java.util.Map.of() override def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = new OffsetAndEpoch(1L, 0) override def fetchLatestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = new OffsetAndEpoch(1L, 0) - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = Map.empty + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = java.util.Map.of() - override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = ResultWithPartitions(None, Set.empty) + override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = new ResultWithPartitions(java.util.Optional.empty[ReplicaFetch](), java.util.Set.of()) override val isTruncationOnFetchSupported: Boolean = false @@ -349,4 +356,24 @@ class AbstractFetcherManagerTest { override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] = Optional.of(new OffsetAndEpoch(1, 0)) } + @Test + def testMetricsClassName(): Unit = { + val registry = KafkaYammerMetrics.defaultRegistry() + val config = mock(classOf[KafkaConfig]) + val replicaManager = mock(classOf[ReplicaManager]) + val quotaManager = mock(classOf[ReplicationQuotaManager]) + val brokerTopicStats = new BrokerTopicStats() + val directoryEventHandler = DirectoryEventHandler.NOOP + val metrics = new Metrics() + val time = new MockTime() + val metadataVersionSupplier = () => MetadataVersion.LATEST_PRODUCTION + val brokerEpochSupplier = () => 1L + + val _ = new ReplicaAlterLogDirsManager(config, replicaManager, quotaManager, brokerTopicStats, directoryEventHandler) + val _ = new ReplicaFetcherManager(config, replicaManager, metrics, time, quotaManager, metadataVersionSupplier, brokerEpochSupplier) + val existReplicaAlterLogDirsManager = registry.allMetrics.entrySet().stream().filter(metric => metric.getKey.getType == "ReplicaAlterLogDirsManager").findFirst() + val existReplicaFetcherManager = registry.allMetrics.entrySet().stream().filter(metric => metric.getKey.getType == "ReplicaFetcherManager").findFirst() + assertTrue(existReplicaAlterLogDirsManager.isPresent) + assertTrue(existReplicaFetcherManager.isPresent) + } } diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala index ef06ffcc10a02..046ef52a7de90 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala @@ -30,6 +30,8 @@ import org.apache.kafka.storage.internals.log.LogAppendInfo import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} import kafka.server.FetcherThreadTestUtils.{initialFetchState, mkBatch} +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} +import org.apache.kafka.server.{PartitionFetchState, ReplicaState} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -38,6 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Set} import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ class AbstractFetcherThreadTest { @@ -138,7 +141,7 @@ class AbstractFetcherThreadTest { val fetchBackOffMs = 250 val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { throw new UnknownTopicIdException("Topic ID was unknown as expected for this test") } } @@ -180,10 +183,10 @@ class AbstractFetcherThreadTest { val fetchBackOffMs = 250 val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { - Map(partition1 -> new FetchData().setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), - partition2 -> new FetchData().setErrorCode(Errors.INCONSISTENT_TOPIC_ID.code), - partition3 -> new FetchData().setErrorCode(Errors.NONE.code)) + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + Map(partition1 -> new FetchResponseData.PartitionData().setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), + partition2 -> new FetchResponseData.PartitionData().setErrorCode(Errors.INCONSISTENT_TOPIC_ID.code), + partition3 -> new FetchResponseData.PartitionData().setErrorCode(Errors.NONE.code)).asJava } } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) @@ -301,7 +304,7 @@ class AbstractFetcherThreadTest { // Not data has been fetched and the follower is still truncating assertEquals(0, replicaState.logEndOffset) - assertEquals(Some(Truncating), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.TRUNCATING), fetcher.fetchState(partition).map(_.state)) // Bump the epoch on the leader fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch += 1 @@ -310,7 +313,7 @@ class AbstractFetcherThreadTest { fetcher.doWork() assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) } @Test @@ -320,7 +323,7 @@ class AbstractFetcherThreadTest { val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - // This test is contrived because it shouldn't be possible to to see unknown leader epoch + // This test is contrived because it shouldn't be possible to see unknown leader epoch // in the Fetching state as the leader must validate the follower's epoch when it checks // the truncation offset. @@ -339,7 +342,7 @@ class AbstractFetcherThreadTest { // We have fetched one batch and gotten out of the truncation phase assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) // Somehow the leader epoch rewinds fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch = 0 @@ -347,13 +350,13 @@ class AbstractFetcherThreadTest { // We are stuck at the current offset fetcher.doWork() assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) // After returning to the right epoch, we can continue fetching fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch = 1 fetcher.doWork() assertEquals(2, replicaState.logEndOffset) - assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) } @Test @@ -396,7 +399,7 @@ class AbstractFetcherThreadTest { val highWatermark = 2L val partition = new TopicPartition("topic", 0) val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = throw new UnsupportedOperationException } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) @@ -575,7 +578,7 @@ class AbstractFetcherThreadTest { // initial truncation and verify that the log end offset is updated fetcher.doWork() assertEquals(3L, replicaState.logEndOffset) - assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) // To hit this case, we have to change the leader log without going through the truncation phase leaderState.log.clear() @@ -662,7 +665,7 @@ class AbstractFetcherThreadTest { // Second iteration required here since first iteration is required to // perform initial truncation based on diverging epoch. fetcher.doWork() - assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) assertEquals(2, replicaState.logStartOffset) assertEquals(List(), replicaState.log.toList) @@ -710,7 +713,7 @@ class AbstractFetcherThreadTest { // initial truncation and initial error response handling fetcher.doWork() - assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) TestUtils.waitUntilTrue(() => { fetcher.doWork() @@ -748,7 +751,7 @@ class AbstractFetcherThreadTest { fetcher.mockLeader.setLeaderState(partition, leaderState) fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) assertEquals(0, replicaState.logStartOffset) assertEquals(List(), replicaState.log.toList) @@ -768,8 +771,8 @@ class AbstractFetcherThreadTest { val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { var fetchedOnce = false - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { - val fetchedData = super.fetch(fetchRequest) + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + val fetchedData = super.fetch(fetchRequest).asScala if (!fetchedOnce) { val records = fetchedData.head._2.records.asInstanceOf[MemoryRecords] val buffer = records.buffer() @@ -778,7 +781,7 @@ class AbstractFetcherThreadTest { fetchedOnce = true } fetchedData - } + }.asJava } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) @@ -821,7 +824,7 @@ class AbstractFetcherThreadTest { val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { var fetchEpochsFromLeaderOnce = false - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { val fetchedEpochs = super.fetchEpochEndOffsets(partitions) if (!fetchEpochsFromLeaderOnce) { responseCallback.apply() @@ -854,7 +857,7 @@ class AbstractFetcherThreadTest { // Since leader epoch changed, fetch epochs response is ignored due to partition being in // truncating state with the updated leader epoch - assertEquals(Option(Truncating), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(ReplicaState.TRUNCATING), fetcher.fetchState(partition).map(_.state)) assertEquals(Option(nextLeaderEpochOnFollower), fetcher.fetchState(partition).map(_.currentLeaderEpoch)) if (leaderEpochOnLeader < nextLeaderEpochOnFollower) { @@ -875,7 +878,7 @@ class AbstractFetcherThreadTest { val nextLeaderEpochOnFollower = initialLeaderEpochOnFollower + 1 val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset]= { val fetchedEpochs = super.fetchEpochEndOffsets(partitions) responseCallback.apply() fetchedEpochs @@ -921,14 +924,14 @@ class AbstractFetcherThreadTest { def testTruncationThrowsExceptionIfLeaderReturnsPartitionsNotRequestedInFetchEpochs(): Unit = { val partition = new TopicPartition("topic", 0) val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { val unrequestedTp = new TopicPartition("topic2", 0) - super.fetchEpochEndOffsets(partitions).toMap + (unrequestedTp -> new EpochEndOffset() + super.fetchEpochEndOffsets(partitions).asScala + (unrequestedTp -> new EpochEndOffset() .setPartition(unrequestedTp.partition) .setErrorCode(Errors.NONE.code) .setLeaderEpoch(0) .setEndOffset(0)) - } + }.asJava } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) @@ -999,7 +1002,7 @@ class AbstractFetcherThreadTest { // make sure the fetcher continues to work with rest of the partitions fetcher.doWork() - assertEquals(Some(Fetching), fetcher.fetchState(partition2).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition2).map(_.state)) assertFalse(failedPartitions.contains(partition2)) // simulate a leader change @@ -1008,7 +1011,7 @@ class AbstractFetcherThreadTest { fetcher.addPartitions(Map(partition1 -> initialFetchState(topicIds.get(partition1.topic), 0L, leaderEpoch = 1)), forceTruncation = true) // partition1 added back - assertEquals(Some(Truncating), fetcher.fetchState(partition1).map(_.state)) + assertEquals(Some(ReplicaState.TRUNCATING), fetcher.fetchState(partition1).map(_.state)) assertFalse(failedPartitions.contains(partition1)) } @@ -1138,7 +1141,7 @@ class AbstractFetcherThreadTest { def verifyFetchState(fetchState: Option[PartitionFetchState], expectedTopicId: Option[Uuid]): Unit = { assertTrue(fetchState.isDefined) - assertEquals(expectedTopicId, fetchState.get.topicId) + assertEquals(expectedTopicId, fetchState.get.topicId.toScala) } verifyFetchState(fetcher.fetchState(partition), None) @@ -1177,4 +1180,564 @@ class AbstractFetcherThreadTest { fetcher.processFetchRequest(partitionData, fetchRequestOpt) assertEquals(0, replicaState.logEndOffset, "FetchResponse should be ignored when leader epoch does not match") } + + private def emptyReplicaState(rlmEnabled: Boolean, partition: TopicPartition, fetcher: MockFetcherThread): PartitionState = { + // Follower begins with an empty log + val replicaState = PartitionState(Seq(), leaderEpoch = 0, highWatermark = 0L, rlmEnabled = rlmEnabled) + fetcher.setReplicaState(partition, replicaState) + fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), fetchOffset = 0, leaderEpoch = 0))) + replicaState + } + + /** + * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset = 0 + * + * Purpose: + * - Simulate a leader with logs starting at offset 0 and validate how the follower + * behaves when TieredStorage is disabled. + * + * Conditions: + * - TieredStorage: **Disabled** + * - Leader LogStartOffset: **0** + * + * Scenario: + * - The leader starts with a log at offset 0, containing three record batches offset at 0, 150, and 199. + * - The follower begins fetching, and we validate the correctness of its replica state as it fetches. + * + * Expected Outcomes: + * 1. The follower fetch state should transition to `FETCHING` initially. + * 2. After the first poll, one record batch is fetched. + * 3. After subsequent polls, the entire leader log is fetched: + * - Replica log size: 3 + * - Replica LogStartOffset: 0 + * - Replica LogEndOffset: 200 + * - Replica HighWatermark: 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetZero(): Unit = { + val rlmEnabled = false + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LogStartOffset = LocalLogStartOffset = 0 + mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled + ) + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(1, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(1, replicaState.logEndOffset) + assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 2) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset != 0 + * + * Purpose: + * - Validate follower behavior when the leader's log starts at a non-zero offset (10). + * + * Conditions: + * - TieredStorage: **Disabled** + * - Leader LogStartOffset: **10** + * + * Scenario: + * - The leader log starts at offset 10 with batches at 10, 150, and 199. + * - The follower starts fetching from offset 10. + * + * Expected Outcomes: + * 1. The follower's initial log is empty. + * 2. Replica offsets after polls: + * - LogStartOffset = 10 + * - LogEndOffset = 200 + * - HighWatermark = 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetNonZero(): Unit = { + val rlmEnabled = false + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LogStartOffset = LocalLogStartOffset = 10 + mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled + ) + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + // Follower gets out-of-range error (no messages received), fetch offset is updated from 0 to 10 + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(10, replicaState.logStartOffset) + assertEquals(10, replicaState.logEndOffset) + assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 3) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(10, replicaState.logStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and No Local Deletions + * + * Purpose: + * - Simulate TieredStorage enabled and validate follower fetching behavior when the leader + * log starts at 0 and no segments have been uploaded or deleted locally. + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **0** + * - Leader LocalLogStartOffset: **0** (No local segments deleted). + * + * Scenario: + * - The leader log contains three record batches at offsets 0, 150, and 199. + * - The follower starts fetching from offset 0. + * + * Expected Outcomes: + * 1. The replica log accurately reflects the leader's log: + * - LogStartOffset = 0 + * - LocalLogStartOffset = 0 + * - LogEndOffset = 200 + * - HighWatermark = 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroNoLocalDeletions(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LogStartOffset = LocalLogStartOffset = 0 + mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled + ) + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(1, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(0, replicaState.localLogStartOffset) + assertEquals(1, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 2) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(0, replicaState.localLogStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and Local Deletions + * + * Purpose: + * - Simulate TieredStorage enabled with some segments uploaded and deleted locally, causing + * a difference between the leader's LogStartOffset (0) and LocalLogStartOffset (> 0). + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **0** + * - Leader LocalLogStartOffset: **100** (Some segments deleted locally). + * + * Scenario: + * - The leader log starts at offset 0 but the local leader log starts at offset 100. + * - The follower fetch operation begins from offset 0. + * + * Expected Outcomes: + * 1. After offset adjustments for local deletions: + * - LogStartOffset = 0 + * - LocalLogStartOffset = 100 + * - LogEndOffset = 200 + * - HighWatermark = 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroWithLocalDeletions(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LocalLogStartOffset = 100 + mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled + ) + leaderState.logStartOffset = 0 + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(100, replicaState.localLogStartOffset) + assertEquals(100, replicaState.logEndOffset) + assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 3) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(100, replicaState.localLogStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and No Local Deletions + * + * Purpose: + * - Simulate TieredStorage enabled and validate follower fetch behavior when the leader's log + * starts at a non-zero offset and no local deletions have occurred. + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **10** + * - Leader LocalLogStartOffset: **10** (No deletions). + * + * Scenario: + * - The leader log starts at offset 10 with batches at 10, 150, and 199. + * - The follower starts fetching from offset 10. + * + * Expected Outcomes: + * 1. After fetching, the replica log matches the leader: + * - LogStartOffset = 10 + * - LocalLogStartOffset = 10 + * - LogEndOffset = 200 + * - HighWatermark = 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroNoLocalDeletions(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LogStartOffset = LocalLogStartOffset = 10 + mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled, + ) + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(10, replicaState.localLogStartOffset) + assertEquals(10, replicaState.logEndOffset) + assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 3) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(10, replicaState.logStartOffset) + assertEquals(10, replicaState.localLogStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and Local Deletions + * + * Purpose: + * - Validate follower adjustments when the leader has log deletions causing + * LocalLogStartOffset > LogStartOffset. + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **10** + * - Leader LocalLogStartOffset: **100** (All older segments deleted locally). + * + * Scenario: + * - The leader log starts at offset 10 but the local log starts at offset 100. + * - The follower fetch starts at offset 10 but adjusts for local deletions. + * + * Expected Outcomes: + * 1. Initial fetch offset adjustments: + * - First adjustment: LogEndOffset = 10 (after offset-out-of-range error) + * - Second adjustment: LogEndOffset = 100 (after offset-moved-to-tiered-storage error) + * 2. After successful fetches: + * - 3 record batches fetched + * - LogStartOffset = 10 + * - LocalLogStartOffset = 100 + * - LogEndOffset = 200 + * - HighWatermark = 199 + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroWithLocalDeletions(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LocalLogStartOffset = 100 + mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 199L, + rlmEnabled = rlmEnabled, + ) + leaderState.logStartOffset = 10 + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + // On offset-out-of-range error, fetch offset is updated + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(10, replicaState.localLogStartOffset) + assertEquals(10, replicaState.logEndOffset) + assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) + + fetcher.doWork() + // On offset-moved-to-tiered-storage error, fetch offset is updated + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(100, replicaState.localLogStartOffset) + assertEquals(100, replicaState.logEndOffset) + assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. + for (_ <- 1 to 3) fetcher.doWork() + assertEquals(3, replicaState.log.size) + assertEquals(10, replicaState.logStartOffset) + assertEquals(100, replicaState.localLogStartOffset) + assertEquals(200, replicaState.logEndOffset) + assertEquals(199, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, All Local Segments Deleted + * + * Purpose: + * - Handle scenarios where all local segments have been deleted: + * - LocalLogStartOffset > LogStartOffset. + * - LocalLogStartOffset = LogEndOffset. + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **0 or > 0** + * - Leader LocalLogStartOffset: Leader LogEndOffset (all segments deleted locally). + * + * Expected Outcomes: + * 1. Follower state is adjusted to reflect local deletions: + * - LocalLogStartOffset = LogEndOffset. + * - No new data remains to fetch. + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroAllLocalSegmentsDeleted(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LocalLogStartOffset = 100 + mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 151L, + rlmEnabled = rlmEnabled + ) + leaderState.logStartOffset = 0 + // Set Local Log Start Offset to Log End Offset + leaderState.localLogStartOffset = 151 + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + + // On offset-moved-to-tiered-storage error, fetch offset is updated + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(151, replicaState.localLogStartOffset) + assertEquals(151, replicaState.logEndOffset) + assertEquals(151, replicaState.highWatermark) + assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Call once again to see if new data is received + fetcher.doWork() + // No metadata update expected + assertEquals(0, replicaState.log.size) + assertEquals(0, replicaState.logStartOffset) + assertEquals(151, replicaState.localLogStartOffset) + assertEquals(151, replicaState.logEndOffset) + assertEquals(151, replicaState.highWatermark) + } + + /** + * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and All Local Segments Deleted + * + * Purpose: + * - Validate follower behavior when TieredStorage is enabled, the leader's log starts at a non-zero offset, + * and all local log segments have been deleted. + * + * Conditions: + * - TieredStorage: **Enabled** + * - Leader LogStartOffset: **10** + * - Leader LocalLogStartOffset: **151** (all older segments deleted locally). + * + * Scenario: + * - The leader log contains record batches from offset 100, but all local segments up to offset 151 are deleted. + * - The follower starts at LogStartOffset = 10 and adjusts for local segment deletions. + * + * Expected Outcomes: + * 1. Follower detects offset adjustments due to local deletions: + * - LogStartOffset remains 10. + * - LocalLogStartOffset updates to 151. + * - LogEndOffset updates to 151. + * 2. HighWatermark aligns with the leader (151). + * 3. No new data is fetched since all relevant segments are deleted. + */ + @Test + def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroAllLocalSegmentsDeleted(): Unit = { + val rlmEnabled = true + val partition = new TopicPartition("topic1", 0) + val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) + val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) + + val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) + + val leaderLog = Seq( + // LocalLogStartOffset = 100 + mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), + mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), + ) + + val leaderState = PartitionState( + leaderLog, + leaderEpoch = 0, + highWatermark = 151L, + rlmEnabled = rlmEnabled + ) + leaderState.logStartOffset = 10 + // Set Local Log Start Offset to Log End Offset + leaderState.localLogStartOffset = 151 + fetcher.mockLeader.setLeaderState(partition, leaderState) + fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) + + fetcher.doWork() + + // On offset-out-of-range error, fetch offset is updated + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(10, replicaState.localLogStartOffset) + assertEquals(10, replicaState.logEndOffset) + assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) + + // On offset-moved-to-tiered-storage error, fetch offset is updated + fetcher.doWork() + assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(0, replicaState.log.size) + assertEquals(151, replicaState.localLogStartOffset) + assertEquals(151, replicaState.logEndOffset) + assertEquals(151, replicaState.highWatermark) + assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset())) + + // Call once again to see if new data is received + fetcher.doWork() + // No metadata update expected + assertEquals(0, replicaState.log.size) + assertEquals(10, replicaState.logStartOffset) + assertEquals(151, replicaState.localLogStartOffset) + assertEquals(151, replicaState.logEndOffset) + assertEquals(151, replicaState.highWatermark) + } } \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala deleted file mode 100644 index 939d63789ff6d..0000000000000 --- a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala +++ /dev/null @@ -1,463 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import com.yammer.metrics.core.{Histogram, Meter} -import kafka.utils.TestUtils -import org.apache.kafka.clients.{ClientResponse, NetworkClient} -import org.apache.kafka.common.errors.{AuthenticationException, SaslAuthenticationException, UnsupportedVersionException} -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} -import org.apache.kafka.common.message.AddPartitionsToTxnResponseData -import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection -import org.apache.kafka.common.{Node, TopicPartition} -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{AbstractResponse, AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, MetadataResponse} -import org.apache.kafka.common.utils.MockTime -import org.apache.kafka.metadata.{LeaderAndIsr, MetadataCache} -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.util.RequestAndCompletionHandler -import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource -import org.mockito.ArgumentMatchers -import org.mockito.ArgumentMatchers.{any, anyLong, anyString} -import org.mockito.MockedConstruction.Context -import org.mockito.Mockito.{mock, mockConstruction, times, verify, verifyNoMoreInteractions, when} - -import java.util -import java.util.concurrent.TimeUnit -import scala.collection.mutable -import scala.jdk.CollectionConverters._ - -class AddPartitionsToTxnManagerTest { - private val networkClient: NetworkClient = mock(classOf[NetworkClient]) - private val metadataCache: MetadataCache = mock(classOf[MetadataCache]) - private val partitionFor: String => Int = mock(classOf[String => Int]) - - private val time = new MockTime - - private var addPartitionsToTxnManager: AddPartitionsToTxnManager = _ - - private val topic = "foo" - private val topicPartitions = List(new TopicPartition(topic, 1), new TopicPartition(topic, 2), new TopicPartition(topic, 3)) - - private val node0 = new Node(0, "host1", 0) - private val node1 = new Node(1, "host2", 1) - private val node2 = new Node(2, "host2", 2) - - private val transactionalId1 = "txn1" - private val transactionalId2 = "txn2" - private val transactionalId3 = "txn3" - - private val producerId1 = 0L - private val producerId2 = 1L - private val producerId3 = 2L - - private val authenticationErrorResponse = clientResponse(null, authException = new SaslAuthenticationException("")) - private val versionMismatchResponse = clientResponse(null, mismatchException = new UnsupportedVersionException("")) - private val disconnectedResponse = clientResponse(null, disconnected = true) - private val transactionSupportedOperation = genericErrorSupported - - private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) - - @BeforeEach - def setup(): Unit = { - addPartitionsToTxnManager = new AddPartitionsToTxnManager( - config, - networkClient, - metadataCache, - partitionFor, - time - ) - } - - @AfterEach - def teardown(): Unit = { - addPartitionsToTxnManager.shutdown() - } - - private def setErrors(errors: mutable.Map[TopicPartition, Errors])(callbackErrors: Map[TopicPartition, Errors]): Unit = { - callbackErrors.foreachEntry(errors.put) - } - - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testAddTxnData(isAddPartition: Boolean): Unit = { - val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported - when(partitionFor.apply(transactionalId1)).thenReturn(0) - when(partitionFor.apply(transactionalId2)).thenReturn(1) - when(partitionFor.apply(transactionalId3)).thenReturn(0) - mockTransactionStateMetadata(0, 0, Some(node0)) - mockTransactionStateMetadata(1, 1, Some(node1)) - - val transaction1Errors = mutable.Map[TopicPartition, Errors]() - val transaction2Errors = mutable.Map[TopicPartition, Errors]() - val transaction3Errors = mutable.Map[TopicPartition, Errors]() - - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transaction3Errors), transactionSupportedOperation) - - // We will try to add transaction1 3 more times (retries). One will have the same epoch, one will have a newer epoch, and one will have an older epoch than the new one we just added. - val transaction1RetryWithSameEpochErrors = mutable.Map[TopicPartition, Errors]() - val transaction1RetryWithNewerEpochErrors = mutable.Map[TopicPartition, Errors]() - val transaction1RetryWithOldEpochErrors = mutable.Map[TopicPartition, Errors]() - - // Trying to add more transactional data for the same transactional ID, producer ID, and epoch should simply replace the old data and send a retriable response. - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithSameEpochErrors), transactionSupportedOperation) - val expectedNetworkErrors = topicPartitions.map(_ -> Errors.NETWORK_EXCEPTION).toMap - assertEquals(expectedNetworkErrors, transaction1Errors) - - // Trying to add more transactional data for the same transactional ID and producer ID, but new epoch should replace the old data and send an error response for it. - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 1, topicPartitions, setErrors(transaction1RetryWithNewerEpochErrors), transactionSupportedOperation) - val expectedEpochErrors = topicPartitions.map(_ -> Errors.INVALID_PRODUCER_EPOCH).toMap - assertEquals(expectedEpochErrors, transaction1RetryWithSameEpochErrors) - - // Trying to add more transactional data for the same transactional ID and producer ID, but an older epoch should immediately return with error and keep the old data queued to send. - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithOldEpochErrors), transactionSupportedOperation) - assertEquals(expectedEpochErrors, transaction1RetryWithOldEpochErrors) - - val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala - requestsAndHandlers.foreach { requestAndHandler => - if (requestAndHandler.destination == node0) { - assertEquals(time.milliseconds(), requestAndHandler.creationTimeMs) - assertEquals( - AddPartitionsToTxnRequest.Builder.forBroker( - new AddPartitionsToTxnTransactionCollection(Seq( - transactionData(transactionalId3, producerId3, verifyOnly = !isAddPartition), - transactionData(transactionalId1, producerId1, producerEpoch = 1, verifyOnly = !isAddPartition) - ).iterator.asJava) - ).data, - requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data // insertion order - ) - } else { - verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) - } - } - } - - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testGenerateRequests(isAddPartition: Boolean): Unit = { - when(partitionFor.apply(transactionalId1)).thenReturn(0) - when(partitionFor.apply(transactionalId2)).thenReturn(1) - when(partitionFor.apply(transactionalId3)).thenReturn(2) - mockTransactionStateMetadata(0, 0, Some(node0)) - mockTransactionStateMetadata(1, 1, Some(node1)) - mockTransactionStateMetadata(2, 2, Some(node2)) - val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported - - val transactionErrors = mutable.Map[TopicPartition, Errors]() - - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - - val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala - assertEquals(2, requestsAndHandlers.size) - // Note: handlers are tested in testAddPartitionsToTxnHandlerErrorHandling - requestsAndHandlers.foreach { requestAndHandler => - if (requestAndHandler.destination == node0) verifyRequest(node0, transactionalId1, producerId1, !isAddPartition, requestAndHandler) - else verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) - } - - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - - // Test creationTimeMs increases too. - time.sleep(10) - - val requestsAndHandlers2 = addPartitionsToTxnManager.generateRequests().asScala - // The request for node1 should not be added because one request is already inflight. - assertEquals(1, requestsAndHandlers2.size) - requestsAndHandlers2.foreach { requestAndHandler => - verifyRequest(node2, transactionalId3, producerId3, !isAddPartition, requestAndHandler) - } - - // Complete the request for node1 so the new one can go through. - requestsAndHandlers.filter(_.destination == node1).head.handler.onComplete(authenticationErrorResponse) - val requestsAndHandlers3 = addPartitionsToTxnManager.generateRequests().asScala - assertEquals(1, requestsAndHandlers3.size) - requestsAndHandlers3.foreach { requestAndHandler => - verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) - } - } - - @Test - def testTransactionCoordinatorResolution(): Unit = { - when(partitionFor.apply(transactionalId1)).thenReturn(0) - - def checkError(): Unit = { - val errors = mutable.Map[TopicPartition, Errors]() - - addPartitionsToTxnManager.addOrVerifyTransaction( - transactionalId1, - producerId1, - producerEpoch = 0, - topicPartitions, - setErrors(errors), - transactionSupportedOperation - ) - - assertEquals(topicPartitions.map(tp => tp -> Errors.COORDINATOR_NOT_AVAILABLE).toMap, errors) - } - - // The transaction state topic does not exist. - when(metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, 0)) - .thenReturn(util.Optional.empty()) - checkError() - - // The partition has no leader. - mockTransactionStateMetadata(0, -1, Option.empty) - checkError() - - // The leader is not available. - mockTransactionStateMetadata(0, 0, Option.empty) - checkError() - } - - @Test - def testAddPartitionsToTxnHandlerErrorHandling(): Unit = { - when(partitionFor.apply(transactionalId1)).thenReturn(0) - when(partitionFor.apply(transactionalId2)).thenReturn(0) - mockTransactionStateMetadata(0, 0, Some(node0)) - - val transaction1Errors = mutable.Map[TopicPartition, Errors]() - val transaction2Errors = mutable.Map[TopicPartition, Errors]() - - def addTransactionsToVerify(): Unit = { - transaction1Errors.clear() - transaction2Errors.clear() - - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) - } - - def addTransactionsToVerifyRequestVersion(operationExpected: TransactionSupportedOperation): Unit = { - transaction1Errors.clear() - transaction2Errors.clear() - - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), operationExpected) - addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), operationExpected) - } - - val expectedAuthErrors = topicPartitions.map(_ -> Errors.SASL_AUTHENTICATION_FAILED).toMap - addTransactionsToVerify() - receiveResponse(authenticationErrorResponse) - assertEquals(expectedAuthErrors, transaction1Errors) - assertEquals(expectedAuthErrors, transaction2Errors) - - // On version mismatch we ignore errors and keep handling. - val expectedVersionMismatchErrors = mutable.HashMap[TopicPartition, Errors]() - addTransactionsToVerify() - receiveResponse(versionMismatchResponse) - assertEquals(expectedVersionMismatchErrors, transaction1Errors) - assertEquals(expectedVersionMismatchErrors, transaction2Errors) - - val expectedDisconnectedErrors = topicPartitions.map(_ -> Errors.NETWORK_EXCEPTION).toMap - addTransactionsToVerify() - receiveResponse(disconnectedResponse) - assertEquals(expectedDisconnectedErrors, transaction1Errors) - assertEquals(expectedDisconnectedErrors, transaction2Errors) - - val expectedTopLevelErrors = topicPartitions.map(_ -> Errors.INVALID_TXN_STATE).toMap - val topLevelErrorAddPartitionsResponse = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData().setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())) - val topLevelErrorResponse = clientResponse(topLevelErrorAddPartitionsResponse) - addTransactionsToVerify() - receiveResponse(topLevelErrorResponse) - assertEquals(expectedTopLevelErrors, transaction1Errors) - assertEquals(expectedTopLevelErrors, transaction2Errors) - - val preConvertedTransaction1Errors = topicPartitions.map(_ -> Errors.PRODUCER_FENCED).toMap - val expectedTransaction1Errors = topicPartitions.map(_ -> Errors.INVALID_PRODUCER_EPOCH).toMap - val preConvertedTransaction2Errors = Map(new TopicPartition("foo", 1) -> Errors.NONE, - new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE, - new TopicPartition("foo", 3) -> Errors.NONE) - val expectedTransaction2Errors = Map(new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE) - - val transaction1ErrorResponse = AddPartitionsToTxnResponse.resultForTransaction(transactionalId1, preConvertedTransaction1Errors.asJava) - val transaction2ErrorResponse = AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, preConvertedTransaction2Errors.asJava) - val mixedErrorsAddPartitionsResponse = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData() - .setResultsByTransaction(new AddPartitionsToTxnResultCollection(Seq(transaction1ErrorResponse, transaction2ErrorResponse).iterator.asJava))) - val mixedErrorsResponse = clientResponse(mixedErrorsAddPartitionsResponse) - - addTransactionsToVerify() - receiveResponse(mixedErrorsResponse) - assertEquals(expectedTransaction1Errors, transaction1Errors) - assertEquals(expectedTransaction2Errors, transaction2Errors) - - val preConvertedTransactionAbortableErrorsTxn1 = topicPartitions.map(_ -> Errors.TRANSACTION_ABORTABLE).toMap - val preConvertedTransactionAbortableErrorsTxn2 = Map(new TopicPartition("foo", 1) -> Errors.NONE, - new TopicPartition("foo", 2) -> Errors.TRANSACTION_ABORTABLE, - new TopicPartition("foo", 3) -> Errors.NONE) - val transactionAbortableErrorResponseTxn1 = AddPartitionsToTxnResponse.resultForTransaction(transactionalId1, preConvertedTransactionAbortableErrorsTxn1.asJava) - val transactionAbortableErrorResponseTxn2 = AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, preConvertedTransactionAbortableErrorsTxn2.asJava) - val mixedErrorsAddPartitionsResponseAbortableError = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData() - .setResultsByTransaction(new AddPartitionsToTxnResultCollection(Seq(transactionAbortableErrorResponseTxn1, transactionAbortableErrorResponseTxn2).iterator.asJava))) - val mixedAbortableErrorsResponse = clientResponse(mixedErrorsAddPartitionsResponseAbortableError) - - val expectedTransactionAbortableErrorsTxn1LowerVersion = topicPartitions.map(_ -> Errors.INVALID_TXN_STATE).toMap - val expectedTransactionAbortableErrorsTxn2LowerVersion = Map(new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE) - - val expectedTransactionAbortableErrorsTxn1HigherVersion = topicPartitions.map(_ -> Errors.TRANSACTION_ABORTABLE).toMap - val expectedTransactionAbortableErrorsTxn2HigherVersion = Map(new TopicPartition("foo", 2) -> Errors.TRANSACTION_ABORTABLE) - - addTransactionsToVerifyRequestVersion(defaultError) - receiveResponse(mixedAbortableErrorsResponse) - assertEquals(expectedTransactionAbortableErrorsTxn1LowerVersion, transaction1Errors) - assertEquals(expectedTransactionAbortableErrorsTxn2LowerVersion, transaction2Errors) - - addTransactionsToVerifyRequestVersion(genericErrorSupported) - receiveResponse(mixedAbortableErrorsResponse) - assertEquals(expectedTransactionAbortableErrorsTxn1HigherVersion, transaction1Errors) - assertEquals(expectedTransactionAbortableErrorsTxn2HigherVersion, transaction2Errors) - } - - @Test - def testAddPartitionsToTxnManagerMetrics(): Unit = { - val startTime = time.milliseconds() - val transactionErrors = mutable.Map[TopicPartition, Errors]() - - var maxVerificationTime: Long = 0 - val mockVerificationFailureMeter = mock(classOf[Meter]) - val mockVerificationTime = mock(classOf[Histogram]) - - when(partitionFor.apply(transactionalId1)).thenReturn(0) - when(partitionFor.apply(transactionalId2)).thenReturn(1) - mockTransactionStateMetadata(0, 0, Some(node0)) - mockTransactionStateMetadata(1, 1, Some(node1)) - - // Update max verification time when we see a higher verification time. - when(mockVerificationTime.update(anyLong())).thenAnswer { invocation => - val newTime = invocation.getArgument(0).asInstanceOf[Long] - if (newTime > maxVerificationTime) - maxVerificationTime = newTime - } - - val mockMetricsGroupCtor = mockConstruction(classOf[KafkaMetricsGroup], (mock: KafkaMetricsGroup, context: Context) => { - when(mock.newMeter(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationFailureRateMetricName), anyString(), any(classOf[TimeUnit]))).thenReturn(mockVerificationFailureMeter) - when(mock.newHistogram(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationTimeMsMetricName))).thenReturn(mockVerificationTime) - }) - - val addPartitionsManagerWithMockedMetrics = new AddPartitionsToTxnManager( - config, - networkClient, - metadataCache, - partitionFor, - time - ) - - try { - addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - - time.sleep(100) - - val requestsAndHandlers = addPartitionsManagerWithMockedMetrics.generateRequests() - var requestsHandled = 0 - - requestsAndHandlers.forEach { requestAndCompletionHandler => - time.sleep(100) - requestAndCompletionHandler.handler.onComplete(authenticationErrorResponse) - requestsHandled += 1 - verify(mockVerificationTime, times(requestsHandled)).update(anyLong()) - assertEquals(maxVerificationTime, time.milliseconds() - startTime) - verify(mockVerificationFailureMeter, times(requestsHandled)).mark(3) // since there are 3 partitions - } - - // shutdown the manager so that metrics are removed. - addPartitionsManagerWithMockedMetrics.shutdown() - - val mockMetricsGroup = mockMetricsGroupCtor.constructed.get(0) - - verify(mockMetricsGroup).newMeter(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationFailureRateMetricName), anyString(), any(classOf[TimeUnit])) - verify(mockMetricsGroup).newHistogram(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationTimeMsMetricName)) - verify(mockMetricsGroup).removeMetric(AddPartitionsToTxnManager.VerificationFailureRateMetricName) - verify(mockMetricsGroup).removeMetric(AddPartitionsToTxnManager.VerificationTimeMsMetricName) - - // assert that we have verified all invocations on the metrics group. - verifyNoMoreInteractions(mockMetricsGroup) - } finally { - if (mockMetricsGroupCtor != null) { - mockMetricsGroupCtor.close() - } - if (addPartitionsManagerWithMockedMetrics.isRunning) { - addPartitionsManagerWithMockedMetrics.shutdown() - } - } - } - - private def mockTransactionStateMetadata(partitionIndex: Int, leaderId: Int, leaderNode: Option[Node]): Unit = { - when(metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionIndex)) - .thenReturn(util.Optional.of(new LeaderAndIsr(leaderId, util.Arrays.asList(leaderId)))) - if (leaderId != MetadataResponse.NO_LEADER_ID) { - when(metadataCache.getAliveBrokerNode(leaderId, config.interBrokerListenerName)) - .thenReturn(util.Optional.ofNullable(leaderNode.orNull)) - } - } - - private def clientResponse( - response: AbstractResponse, - authException: AuthenticationException = null, - mismatchException: UnsupportedVersionException = null, - disconnected: Boolean = false - ): ClientResponse = { - new ClientResponse(null, null, null, 0, 0, disconnected, mismatchException, authException, response) - } - - private def transactionData( - transactionalId: String, - producerId: Long, - producerEpoch: Short = 0, - verifyOnly: Boolean, - ): AddPartitionsToTxnTransaction = { - new AddPartitionsToTxnTransaction() - .setTransactionalId(transactionalId) - .setProducerId(producerId) - .setProducerEpoch(producerEpoch) - .setVerifyOnly(verifyOnly) - .setTopics(new AddPartitionsToTxnTopicCollection( - Seq(new AddPartitionsToTxnTopic() - .setName(topic) - .setPartitions(Seq[Integer](1, 2, 3).asJava)).iterator.asJava)) - } - - private def receiveResponse(response: ClientResponse): Unit = { - addPartitionsToTxnManager.generateRequests().asScala.head.handler.onComplete(response) - } - - private def verifyRequest( - expectedDestination: Node, - transactionalId: String, - producerId: Long, - verifyOnly: Boolean, - requestAndHandler: RequestAndCompletionHandler - ): Unit = { - assertEquals(time.milliseconds(), requestAndHandler.creationTimeMs) - assertEquals(expectedDestination, requestAndHandler.destination) - assertEquals( - AddPartitionsToTxnRequest.Builder.forBroker( - new AddPartitionsToTxnTransactionCollection( - Seq(transactionData(transactionalId, producerId, verifyOnly = verifyOnly)).iterator.asJava - ) - ).data, - requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data - ) - } -} diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala index 1b35f93961946..406609239a002 100644 --- a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala @@ -32,9 +32,9 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, FindCoordinatorRequest, FindCoordinatorResponse, InitProducerIdRequest, InitProducerIdResponse} import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{Arguments, MethodSource, ValueSource} +import org.junit.jupiter.params.provider.{Arguments, MethodSource} import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -55,7 +55,7 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { @ParameterizedTest @MethodSource(value = Array("parameters")) - def shouldReceiveOperationNotAttemptedWhenOtherPartitionHasError(quorum: String, version: Short): Unit = { + def shouldReceiveOperationNotAttemptedWhenOtherPartitionHasError(version: Short): Unit = { // The basic idea is that we have one unknown topic and one created topic. We should get the 'UNKNOWN_TOPIC_OR_PARTITION' // error for the unknown topic and the 'OPERATION_NOT_ATTEMPTED' error for the known and authorized topic. val nonExistentTopic = new TopicPartition("unknownTopic", 0) @@ -110,9 +110,8 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, errors.get(nonExistentTopic)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testOneSuccessOneErrorInBatchedRequest(quorum: String): Unit = { + @Test + def testOneSuccessOneErrorInBatchedRequest(): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId1 = "foobar" val transactionalId2 = "barfoo" // "barfoo" maps to the same transaction coordinator @@ -149,9 +148,8 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertEquals(expectedErrors, errors) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testVerifyOnly(quorum: String): Unit = { + @Test + def testVerifyOnly(): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId = "foobar" @@ -209,7 +207,7 @@ object AddPartitionsToTxnRequestServerTest { def parameters: JStream[Arguments] = { val arguments = mutable.ListBuffer[Arguments]() ApiKeys.ADD_PARTITIONS_TO_TXN.allVersions().forEach { version => - arguments += Arguments.of("kraft", version) + arguments += Arguments.of(version) } arguments.asJava.stream() } diff --git a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala index d54e3227f80c4..16a82fdca8b30 100644 --- a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala @@ -17,12 +17,13 @@ package unit.kafka.server import kafka.network.SocketServer -import kafka.server.{BrokerServer, ControllerServer, IntegrationTestUtils} +import kafka.server.{BrokerServer, ControllerServer} import org.apache.kafka.common.test.api.{ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.message.AllocateProducerIdsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.IntegrationTestUtils import org.apache.kafka.server.common.ProducerIdsBlock import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} @@ -33,7 +34,7 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { def testAllocateProducersIdSentToController(): Unit = { val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] - val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt + val controllerId = sourceBroker.raftManager.client.leaderAndEpoch.leaderId().getAsInt val controllerServer = cluster.controllers.values().stream() .filter(_.config.nodeId == controllerId) .findFirst() @@ -49,7 +50,7 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { def testAllocateProducersIdSentToNonController(): Unit = { val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] - val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt + val controllerId = sourceBroker.raftManager.client.leaderAndEpoch.leaderId().getAsInt val controllerServer = cluster.controllers().values().stream() .filter(_.config.nodeId != controllerId) .findFirst() @@ -81,9 +82,7 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { ): AllocateProducerIdsResponse = { IntegrationTestUtils.connectAndReceive[AllocateProducerIdsResponse]( request, - controllerSocketServer, - cluster.controllerListenerName.get + controllerSocketServer.boundPort(cluster.controllerListenerName()) ) } - } diff --git a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala index b27d03919689b..4793723bc6aea 100644 --- a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala @@ -505,7 +505,7 @@ class AlterPartitionManagerTest { null, // Response is serialized and deserialized to ensure that its does // not contain ignorable fields used by other versions. - AlterPartitionResponse.parse(MessageUtil.toByteBufferAccessor(response.data, version).buffer(), version) + AlterPartitionResponse.parse(MessageUtil.toByteBufferAccessor(response.data, version), version) ) } diff --git a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala index 8e2698b0842cf..16cce3ed81a44 100644 --- a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala @@ -25,10 +25,9 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AlterReplicaLogDirsRequest, AlterReplicaLogDirsResponse} import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.storage.internals.log.LogFileUtils +import org.apache.kafka.storage.internals.log.{LogConfig, LogFileUtils} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import java.util.Properties import scala.jdk.CollectionConverters._ @@ -52,13 +51,12 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { .find(p => p.partitionIndex == tp.partition).get.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterReplicaLogDirsRequest(quorum: String): Unit = { + @Test + def testAlterReplicaLogDirsRequest(): Unit = { val partitionNum = 5 // Alter replica dir before topic creation - val logDir1 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs.get(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) @@ -75,7 +73,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { } // Alter replica dir again after topic creation - val logDir2 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs.get(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs2 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir2).toMap val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(partitionDirs2) // The response should succeed for all partitions @@ -88,13 +86,12 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterReplicaLogDirsRequestErrorCode(quorum: String): Unit = { - val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath - val validDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath - val validDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath - val validDir3 = new File(brokers.head.config.logDirs(3)).getAbsolutePath + @Test + def testAlterReplicaLogDirsRequestErrorCode(): Unit = { + val offlineDir = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath + val validDir1 = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath + val validDir2 = new File(brokers.head.config.logDirs.get(2)).getAbsolutePath + val validDir3 = new File(brokers.head.config.logDirs.get(3)).getAbsolutePath // Test AlterReplicaDirRequest before topic creation val partitionDirs1 = mutable.Map.empty[TopicPartition, String] @@ -127,13 +124,12 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { assertEquals(Errors.KAFKA_STORAGE_ERROR, findErrorForPartition(alterReplicaDirResponse3, new TopicPartition(topic, 2))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterReplicaLogDirsRequestWithRetention(quorum: String): Unit = { + @Test + def testAlterReplicaLogDirsRequestWithRetention(): Unit = { val partitionNum = 1 // Alter replica dir before topic creation - val logDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) @@ -148,7 +144,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { // We don't want files with `.deleted` suffix are removed too fast, // so we can validate there will be orphan files and orphan files will be removed eventually. topicProperties.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, "10000") - topicProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1024") + topicProperties.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "1024") createTopic(topic, partitionNum, 1, topicProperties) assertEquals(logDir1, brokers.head.logManager.getLog(tp).get.dir.getParent) @@ -166,7 +162,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { }, "timed out waiting for log segment to retention") // Alter replica dir again after topic creation - val logDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs.get(2)).getAbsolutePath val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(Map(tp -> logDir2)) // The response should succeed for all partitions assertEquals(Errors.NONE, findErrorForPartition(alterReplicaLogDirsResponse2, tp)) diff --git a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala index 7ea3052925be9..99231470b12b0 100644 --- a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala @@ -25,9 +25,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AlterUserScramCredentialsRequest, AlterUserScramCredentialsResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.TestInfo -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{Test, TestInfo} import java.util import java.util.Properties @@ -54,9 +52,8 @@ class AlterUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTest private val user1 = "user1" private val user2 = "user2" - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterNothingNotAuthorized(quorum: String): Unit = { + @Test + def testAlterNothingNotAuthorized(): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(new util.ArrayList[AlterUserScramCredentialsRequestData.ScramCredentialDeletion]) @@ -67,9 +64,8 @@ class AlterUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTest assertEquals(0, results.size) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterSomethingNotAuthorized(quorum: String): Unit = { + @Test + def testAlterSomethingNotAuthorized(): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) diff --git a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala index ced7887351082..16c6203bac579 100644 --- a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala @@ -34,7 +34,7 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.config.ServerConfigs -import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -79,9 +79,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { private val user3 = "user3@user3.com" private val unknownUser = "unknownUser" - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterNothing(quorum: String): Unit = { + @Test + def testAlterNothing(): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(new util.ArrayList[AlterUserScramCredentialsRequestData.ScramCredentialDeletion]) @@ -92,9 +91,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(0, results.size) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterSameThingTwice(quorum: String): Unit = { + @Test + def testAlterSameThingTwice(): Unit = { val deletion1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val deletion2 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user2).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val upsertion1 = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) @@ -133,9 +131,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { }) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterEmptyUser(quorum: String): Unit = { + @Test + def testAlterEmptyUser(): Unit = { val deletionEmpty = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName("").setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val upsertionEmpty = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName("").setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) .setIterations(4096).setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -162,9 +159,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { }) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterUnknownMechanism(quorum: String): Unit = { + @Test + def testAlterUnknownMechanism(): Unit = { val deletionUnknown1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.UNKNOWN.`type`) val deletionValid1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val deletionUnknown2 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user2).setMechanism(10.toByte) @@ -190,9 +186,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { results.asScala.foreach(result => assertEquals("Unknown SCRAM mechanism", result.errorMessage)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterTooFewIterations(quorum: String): Unit = { + @Test + def testAlterTooFewIterations(): Unit = { val upsertionTooFewIterations = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1) .setMechanism(ScramMechanism.SCRAM_SHA_256.`type`).setIterations(1) .setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -207,9 +202,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals("Too few iterations", results.get(0).errorMessage) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterTooManyIterations(quorum: String): Unit = { + @Test + def testAlterTooManyIterations(): Unit = { val upsertionTooFewIterations = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1) .setMechanism(ScramMechanism.SCRAM_SHA_256.`type`).setIterations(Integer.MAX_VALUE) .setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -224,9 +218,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals("Too many iterations", results.get(0).errorMessage) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteSomethingThatDoesNotExist(quorum: String): Unit = { + @Test + def testDeleteSomethingThatDoesNotExist(): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) @@ -238,12 +231,14 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { checkAllErrorsAlteringCredentials(results, Errors.RESOURCE_NOT_FOUND, "when deleting a non-existing credential") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterAndDescribe(quorum: String): Unit = { + @Test + def testAlterAndDescribe(): Unit = { // create a bunch of credentials val request1_0 = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() + .setDeletions(util.Arrays.asList( + new AlterUserScramCredentialsRequestData.ScramCredentialDeletion() + .setName(user2).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) .setUpsertions(util.Arrays.asList( new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion() .setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) @@ -251,10 +246,15 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { .setSalt(saltBytes) .setSaltedPassword(saltedPasswordBytes), ))).build() + assertEquals("AlterUserScramCredentialsRequestData(" + + "deletions=[ScramCredentialDeletion(name='" + user2 + "', mechanism=" + ScramMechanism.SCRAM_SHA_256.`type` + ")], " + + "upsertions=[ScramCredentialUpsertion(name='" + user1 + "', mechanism=" + ScramMechanism.SCRAM_SHA_256.`type` + + ", iterations=4096, salt=[], saltedPassword=[])])", request1_0.toString) val results1_0 = sendAlterUserScramCredentialsRequest(request1_0).data.results - assertEquals(1, results1_0.size) - checkNoErrorsAlteringCredentials(results1_0) + assertEquals(2, results1_0.size) + assertEquals(1, results1_0.asScala.count(_.errorCode == Errors.RESOURCE_NOT_FOUND.code())) checkUserAppearsInAlterResults(results1_0, user1) + checkUserAppearsInAlterResults(results1_0, user2) // When creating credentials, do not update the same user more than once per request val request1_1 = new AlterUserScramCredentialsRequest.Builder( @@ -276,6 +276,8 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { .setSalt(saltBytes) .setSaltedPassword(saltedPasswordBytes), ))).build() + assertFalse(request1_1.toString.contains(saltBytes)) + assertFalse(request1_1.toString.contains(saltedPasswordBytes)) val results1_1 = sendAlterUserScramCredentialsRequest(request1_1).data.results assertEquals(3, results1_1.size) checkNoErrorsAlteringCredentials(results1_1) diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index 6e32cfc01f8be..9b58207d2f368 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -19,9 +19,10 @@ package kafka.server import org.apache.kafka.common.message.ApiVersionsRequestData import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.ApiVersionsRequest +import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Type} +import org.apache.kafka.server.IntegrationTestUtils import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ @@ -33,7 +34,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequest(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = sendApiVersionsRequest(request, cluster.clientListener()) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.brokerBoundPorts().get(0)) validateApiVersionsResponse(apiVersionsResponse) } @@ -43,15 +44,15 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequestIncludesUnreleasedApis(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = sendApiVersionsRequest(request, cluster.clientListener()) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.brokerBoundPorts().get(0)) validateApiVersionsResponse(apiVersionsResponse, enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT)) def testApiVersionsRequestThroughControllerListener(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = sendApiVersionsRequest(request, cluster.controllerListenerName.get()) - validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName.get(), enableUnstableLastVersion = true) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.controllerBoundPorts().get(0)) + validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName(), enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT)) @@ -73,7 +74,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequestValidationV0(): Unit = { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0.asInstanceOf[Short]) - val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.clientListener()) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.brokerBoundPorts().get(0)) validateApiVersionsResponse(apiVersionsResponse, apiVersion = 0, enableUnstableLastVersion = !"false".equals( cluster.config().serverProperties().get("unstable.api.versions.enable"))) @@ -82,15 +83,15 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio @ClusterTest(types = Array(Type.KRAFT)) def testApiVersionsRequestValidationV0ThroughControllerListener(): Unit = { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0.asInstanceOf[Short]) - val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.controllerListenerName.get()) - validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName.get(), apiVersion = 0, enableUnstableLastVersion = true) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.controllerBoundPorts().get(0)) + validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName(), apiVersion = 0, enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT)) def testApiVersionsRequestValidationV3(): Unit = { // Invalid request because Name and Version are empty by default val apiVersionsRequest = new ApiVersionsRequest(new ApiVersionsRequestData(), 3.asInstanceOf[Short]) - val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.clientListener()) + val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.brokerBoundPorts().get(0)) assertEquals(Errors.INVALID_REQUEST.code(), apiVersionsResponse.data.errorCode()) } } diff --git a/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala b/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala index 45b470f402cf8..b226b58c81650 100644 --- a/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala @@ -33,25 +33,29 @@ import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopi import org.apache.kafka.common.network.{ClientInformation, ListenerName} import org.apache.kafka.common.protocol.{ApiKeys, ByteBufferAccessor, Errors} import org.apache.kafka.common.requests._ +import org.apache.kafka.common.requests.RequestUtils import org.apache.kafka.common.security.auth.{KafkaPrincipal, KafkaPrincipalSerde, SecurityProtocol} import org.apache.kafka.common.utils.{SecurityUtils, Utils} +import org.apache.kafka.server.util.MockTime import org.apache.kafka.coordinator.group.{GroupCoordinator, GroupCoordinatorConfig} import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorConfig} import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} +import org.apache.kafka.server.quota.ControllerMutationQuota import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} import org.junit.jupiter.api.{BeforeEach, Test} import org.mockito.ArgumentMatchers.any -import org.mockito.Mockito.never import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} +import org.mockito.Mockito.never import scala.collection.{Map, Seq} class AutoTopicCreationManagerTest { private val requestTimeout = 100 + private val testCacheCapacity = 3 private var config: KafkaConfig = _ private val metadataCache = Mockito.mock(classOf[MetadataCache]) private val brokerToController = Mockito.mock(classOf[NodeToControllerChannelManager]) @@ -59,6 +63,7 @@ class AutoTopicCreationManagerTest { private val transactionCoordinator = Mockito.mock(classOf[TransactionCoordinator]) private val shareCoordinator = Mockito.mock(classOf[ShareCoordinator]) private var autoTopicCreationManager: AutoTopicCreationManager = _ + private val mockTime = new MockTime(0L, 0L) private val internalTopicPartitions = 2 private val internalTopicReplicationFactor: Short = 2 @@ -75,6 +80,8 @@ class AutoTopicCreationManagerTest { props.setProperty(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) props.setProperty(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) props.setProperty(ShareCoordinatorConfig.STATE_TOPIC_NUM_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) + // Set a short group max session timeout for testing TTL (1 second) + props.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, "1000") config = KafkaConfig.fromProps(props) val aliveBrokers = util.List.of(new Node(0, "host0", 0), new Node(1, "host1", 1)) @@ -114,7 +121,9 @@ class AutoTopicCreationManagerTest { brokerToController, groupCoordinator, transactionCoordinator, - Some(shareCoordinator)) + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) val topicsCollection = new CreateTopicsRequestData.CreatableTopicCollection topicsCollection.add(getNewTopic(topicName, numPartitions, replicationFactor)) @@ -150,7 +159,7 @@ class AutoTopicCreationManagerTest { val requestContext = initializeRequestContext(userPrincipal, Optional.of(principalSerde)) autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext)) assertTrue(serializeIsCalled.get()) @@ -170,7 +179,7 @@ class AutoTopicCreationManagerTest { // Throw upon undefined principal serde when building the forward request assertThrows(classOf[IllegalArgumentException], () => autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext))) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext))) } @Test @@ -186,9 +195,9 @@ class AutoTopicCreationManagerTest { val requestContext = initializeRequestContext(KafkaPrincipal.ANONYMOUS, Optional.of(principalSerde)) autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext)) autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext)) // Should only trigger once val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) @@ -208,7 +217,7 @@ class AutoTopicCreationManagerTest { // Could do the send again as inflight topics are cleared. autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext)) Mockito.verify(brokerToController, Mockito.times(2)).sendRequest( any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), argumentCaptor.capture()) @@ -217,7 +226,7 @@ class AutoTopicCreationManagerTest { @Test def testCreateStreamsInternalTopics(): Unit = { val topicConfig = new CreatableTopicConfigCollection() - topicConfig.add(new CreatableTopicConfig().setName("cleanup.policy").setValue("compact")); + topicConfig.add(new CreatableTopicConfig().setName("cleanup.policy").setValue("compact")) val topics = Map( "stream-topic-1" -> new CreatableTopic().setName("stream-topic-1").setNumPartitions(3).setReplicationFactor(2).setConfigs(topicConfig), @@ -230,9 +239,11 @@ class AutoTopicCreationManagerTest { brokerToController, groupCoordinator, transactionCoordinator, - Some(shareCoordinator)) + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) - autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext) + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) val argumentCaptor = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]) Mockito.verify(brokerToController).sendRequest( @@ -251,7 +262,7 @@ class AutoTopicCreationManagerTest { .build(ApiKeys.CREATE_TOPICS.latestVersion()) val forwardedRequestBuffer = capturedRequest.requestData().duplicate() - assertEquals(requestHeader, RequestHeader.parse(forwardedRequestBuffer)); + assertEquals(requestHeader, RequestHeader.parse(forwardedRequestBuffer)) assertEquals(requestBody.data(), CreateTopicsRequest.parse(new ByteBufferAccessor(forwardedRequestBuffer), ApiKeys.CREATE_TOPICS.latestVersion()).data()) } @@ -266,52 +277,17 @@ class AutoTopicCreationManagerTest { brokerToController, groupCoordinator, transactionCoordinator, - Some(shareCoordinator)) + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) - autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext) + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) Mockito.verify(brokerToController, never()).sendRequest( any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), any(classOf[ControllerRequestCompletionHandler])) } - @Test - def testCreateStreamsInternalTopicsWithDefaultConfig(): Unit = { - val topics = Map( - "stream-topic-1" -> new CreatableTopic().setName("stream-topic-1").setNumPartitions(-1).setReplicationFactor(-1) - ) - val requestContext = initializeRequestContextWithUserPrincipal() - - autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, - brokerToController, - groupCoordinator, - transactionCoordinator, - Some(shareCoordinator)) - - autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext); - - val argumentCaptor = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]) - Mockito.verify(brokerToController).sendRequest( - argumentCaptor.capture(), - any(classOf[ControllerRequestCompletionHandler])) - - val capturedRequest = argumentCaptor.getValue.asInstanceOf[EnvelopeRequest.Builder].build(ApiKeys.ENVELOPE.latestVersion()) - - val requestHeader = new RequestHeader(ApiKeys.CREATE_TOPICS, ApiKeys.CREATE_TOPICS.latestVersion(), "clientId", 0) - val topicsCollection = new CreateTopicsRequestData.CreatableTopicCollection - topicsCollection.add(getNewTopic("stream-topic-1", config.numPartitions, config.defaultReplicationFactor.toShort)) - val requestBody = new CreateTopicsRequest.Builder( - new CreateTopicsRequestData() - .setTopics(topicsCollection) - .setTimeoutMs(requestTimeout)) - .build(ApiKeys.CREATE_TOPICS.latestVersion()) - val forwardedRequestBuffer = capturedRequest.requestData().duplicate() - assertEquals(requestHeader, RequestHeader.parse(forwardedRequestBuffer)); - assertEquals(requestBody.data(), CreateTopicsRequest.parse(new ByteBufferAccessor(forwardedRequestBuffer), - ApiKeys.CREATE_TOPICS.latestVersion()).data()) - } - @Test def testCreateStreamsInternalTopicsPassesPrincipal(): Unit = { val topics = Map( @@ -324,9 +300,11 @@ class AutoTopicCreationManagerTest { brokerToController, groupCoordinator, transactionCoordinator, - Some(shareCoordinator)) + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) - autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext); + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) val argumentCaptor = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]) Mockito.verify(brokerToController).sendRequest( @@ -355,7 +333,9 @@ class AutoTopicCreationManagerTest { brokerToController, groupCoordinator, transactionCoordinator, - Some(shareCoordinator)) + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) val createTopicApiVersion = new ApiVersionsResponseData.ApiVersion() .setApiKey(ApiKeys.CREATE_TOPICS.id) @@ -376,7 +356,7 @@ class AutoTopicCreationManagerTest { isInternal: Boolean, metadataContext: Option[RequestContext] = None): Unit = { val topicResponses = autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, metadataContext) + Set(topicName), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, metadataContext) val expectedResponses = Seq(new MetadataResponseTopic() .setErrorCode(error.code()) @@ -392,4 +372,535 @@ class AutoTopicCreationManagerTest { .setNumPartitions(numPartitions) .setReplicationFactor(replicationFactor) } + + @Test + def testTopicCreationErrorCaching(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic-1" -> new CreatableTopic().setName("test-topic-1").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Simulate a CreateTopicsResponse with errors + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic-1") + .setErrorCode(Errors.TOPIC_ALREADY_EXISTS.code()) + .setErrorMessage("Topic 'test-topic-1' already exists.") + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val header = new RequestHeader(ApiKeys.CREATE_TOPICS, 0, "client", 1) + val clientResponse = new ClientResponse(header, null, null, + 0, 0, false, null, null, createTopicsResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify that the error was cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic-1"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertTrue(cachedErrors.contains("test-topic-1")) + assertEquals("Topic 'test-topic-1' already exists.", cachedErrors("test-topic-1")) + } + + @Test + def testGetTopicCreationErrorsWithMultipleTopics(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "success-topic" -> new CreatableTopic().setName("success-topic").setNumPartitions(1).setReplicationFactor(1), + "failed-topic" -> new CreatableTopic().setName("failed-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Simulate mixed response - one success, one failure + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + createTopicsResponseData.topics().add( + new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("success-topic") + .setErrorCode(Errors.NONE.code()) + ) + createTopicsResponseData.topics().add( + new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("failed-topic") + .setErrorCode(Errors.POLICY_VIOLATION.code()) + .setErrorMessage("Policy violation") + ) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val header = new RequestHeader(ApiKeys.CREATE_TOPICS, 0, "client", 1) + val clientResponse = new ClientResponse(header, null, null, + 0, 0, false, null, null, createTopicsResponse) + + argumentCaptor.getValue.onComplete(clientResponse) + + // Only the failed topic should be cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("success-topic", "failed-topic", "nonexistent-topic"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertTrue(cachedErrors.contains("failed-topic")) + assertEquals("Policy violation", cachedErrors("failed-topic")) + } + + @Test + def testErrorCacheTTL(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + + // First cache an error by simulating topic creation failure + val topics = Map( + "test-topic" -> new CreatableTopic().setName("test-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val shortTtlMs = 1000L // Use 1 second TTL for faster testing + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, shortTtlMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Simulate a CreateTopicsResponse with error + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic") + .setErrorCode(Errors.INVALID_REPLICATION_FACTOR.code()) + .setErrorMessage("Invalid replication factor") + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val header = new RequestHeader(ApiKeys.CREATE_TOPICS, 0, "client", 1) + val clientResponse = new ClientResponse(header, null, null, + 0, 0, false, null, null, createTopicsResponse) + + // Cache the error at T0 + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify error is cached and accessible within TTL + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertEquals("Invalid replication factor", cachedErrors("test-topic")) + + // Advance time beyond TTL + mockTime.sleep(shortTtlMs + 100) // T0 + 1.1 seconds + + // Verify error is now expired and proactively cleaned up + val expiredErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertTrue(expiredErrors.isEmpty, "Expired errors should be proactively cleaned up") + } + + @Test + def testEnvelopeResponseSuccessfulParsing(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic" -> new CreatableTopic().setName("test-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val timeoutMs = 5000L + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, timeoutMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create a successful CreateTopicsResponse + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic") + .setErrorCode(Errors.NONE.code()) + .setNumPartitions(1) + .setReplicationFactor(1.toShort) + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val requestVersion = ApiKeys.CREATE_TOPICS.latestVersion() + val correlationId = requestContext.correlationId // Use the actual correlation ID from request context + val clientId = requestContext.clientId + + // Serialize the CreateTopicsResponse with header as it would appear in an envelope + val responseHeader = new ResponseHeader(correlationId, ApiKeys.CREATE_TOPICS.responseHeaderVersion(requestVersion)) + val serializedResponse = RequestUtils.serialize(responseHeader.data(), responseHeader.headerVersion(), + createTopicsResponse.data(), requestVersion) + + // Create an EnvelopeResponse containing the serialized CreateTopicsResponse + val envelopeResponse = new EnvelopeResponse(serializedResponse, Errors.NONE) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, clientId, correlationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify no errors were cached (successful response) + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertTrue(cachedErrors.isEmpty, "No errors should be cached for successful response") + } + + @Test + def testEnvelopeResponseWithEnvelopeError(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic" -> new CreatableTopic().setName("test-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val timeoutMs = 5000L + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, timeoutMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create an EnvelopeResponse with an envelope-level error + val envelopeResponse = new EnvelopeResponse(ByteBuffer.allocate(0), Errors.UNSUPPORTED_VERSION) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, requestContext.clientId, requestContext.correlationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify the envelope error was cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertTrue(cachedErrors("test-topic").contains("Envelope error: UNSUPPORTED_VERSION")) + } + + @Test + def testEnvelopeResponseParsingException(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic" -> new CreatableTopic().setName("test-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val timeoutMs = 5000L + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, timeoutMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create an EnvelopeResponse with malformed response data that will cause parsing to fail + val malformedData = ByteBuffer.wrap("invalid response data".getBytes()) + val envelopeResponse = new EnvelopeResponse(malformedData, Errors.NONE) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, requestContext.clientId, requestContext.correlationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify the parsing error was cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertTrue(cachedErrors("test-topic").contains("Response parsing error:")) + } + + @Test + def testEnvelopeResponseCorrelationIdMismatch(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic" -> new CreatableTopic().setName("test-topic").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val timeoutMs = 5000L + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, timeoutMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create a CreateTopicsResponse with a different correlation ID than the request + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic") + .setErrorCode(Errors.NONE.code()) + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val requestVersion = ApiKeys.CREATE_TOPICS.latestVersion() + val requestCorrelationId = 123 + val responseCorrelationId = 456 // Different correlation ID + val clientId = "test-client" + + // Serialize the CreateTopicsResponse with mismatched correlation ID + val responseHeader = new ResponseHeader(responseCorrelationId, ApiKeys.CREATE_TOPICS.responseHeaderVersion(requestVersion)) + val serializedResponse = RequestUtils.serialize(responseHeader.data(), responseHeader.headerVersion(), + createTopicsResponse.data(), requestVersion) + + // Create an EnvelopeResponse containing the serialized CreateTopicsResponse + val envelopeResponse = new EnvelopeResponse(serializedResponse, Errors.NONE) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, clientId, requestCorrelationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify the correlation ID mismatch error was cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(Set("test-topic"), mockTime.milliseconds()) + assertEquals(1, cachedErrors.size) + assertTrue(cachedErrors("test-topic").contains("Response parsing error:")) + } + + @Test + def testEnvelopeResponseWithTopicErrors(): Unit = { + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val topics = Map( + "test-topic-1" -> new CreatableTopic().setName("test-topic-1").setNumPartitions(1).setReplicationFactor(1), + "test-topic-2" -> new CreatableTopic().setName("test-topic-2").setNumPartitions(1).setReplicationFactor(1) + ) + val requestContext = initializeRequestContextWithUserPrincipal() + val timeoutMs = 5000L + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, timeoutMs) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create a CreateTopicsResponse with mixed success and error results + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + + // Successful topic + val successResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic-1") + .setErrorCode(Errors.NONE.code()) + .setNumPartitions(1) + .setReplicationFactor(1.toShort) + createTopicsResponseData.topics().add(successResult) + + // Failed topic + val errorResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic-2") + .setErrorCode(Errors.TOPIC_ALREADY_EXISTS.code()) + .setErrorMessage("Topic already exists") + createTopicsResponseData.topics().add(errorResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val requestVersion = ApiKeys.CREATE_TOPICS.latestVersion() + val correlationId = requestContext.correlationId // Use the actual correlation ID from request context + val clientId = requestContext.clientId + + // Serialize the CreateTopicsResponse with header + val responseHeader = new ResponseHeader(correlationId, ApiKeys.CREATE_TOPICS.responseHeaderVersion(requestVersion)) + val serializedResponse = RequestUtils.serialize(responseHeader.data(), responseHeader.headerVersion(), + createTopicsResponse.data(), requestVersion) + + // Create an EnvelopeResponse containing the serialized CreateTopicsResponse + val envelopeResponse = new EnvelopeResponse(serializedResponse, Errors.NONE) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, clientId, correlationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // Verify only the failed topic was cached + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors( + Set("test-topic-1", "test-topic-2"), mockTime.milliseconds()) + + assertEquals(1, cachedErrors.size, s"Expected only 1 error but found: $cachedErrors") + assertTrue(cachedErrors.contains("test-topic-2")) + assertEquals("Topic already exists", cachedErrors("test-topic-2")) + } + + @Test + def testSendCreateTopicRequestEnvelopeHandling(): Unit = { + // Test the sendCreateTopicRequest method (without error caching) handles envelopes correctly + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = testCacheCapacity) + + val requestContext = initializeRequestContextWithUserPrincipal() + + // Call createTopics which uses sendCreateTopicRequest internally + autoTopicCreationManager.createTopics( + Set("test-topic"), ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, Some(requestContext)) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Create a CreateTopicsResponse with an error + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName("test-topic") + .setErrorCode(Errors.INVALID_TOPIC_EXCEPTION.code()) + .setErrorMessage("Invalid topic name") + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val requestVersion = ApiKeys.CREATE_TOPICS.latestVersion() + val correlationId = requestContext.correlationId // Use the actual correlation ID from request context + val clientId = requestContext.clientId + + // Serialize the CreateTopicsResponse with header + val responseHeader = new ResponseHeader(correlationId, ApiKeys.CREATE_TOPICS.responseHeaderVersion(requestVersion)) + val serializedResponse = RequestUtils.serialize(responseHeader.data(), responseHeader.headerVersion(), + createTopicsResponse.data(), requestVersion) + + // Create an EnvelopeResponse containing the serialized CreateTopicsResponse + val envelopeResponse = new EnvelopeResponse(serializedResponse, Errors.NONE) + val requestHeader = new RequestHeader(ApiKeys.ENVELOPE, 0, clientId, correlationId) + val clientResponse = new ClientResponse(requestHeader, null, null, + 0, 0, false, null, null, envelopeResponse) + + // Trigger the completion handler + argumentCaptor.getValue.onComplete(clientResponse) + + // For sendCreateTopicRequest, errors are not cached, but we can verify the handler completed without exception + // The test passes if no exception is thrown during envelope processing + } + + @Test + def testErrorCacheExpirationBasedEviction(): Unit = { + // Create manager with small cache size for testing + autoTopicCreationManager = new DefaultAutoTopicCreationManager( + config, + brokerToController, + groupCoordinator, + transactionCoordinator, + shareCoordinator, + mockTime, + topicErrorCacheCapacity = 3) + + val requestContext = initializeRequestContextWithUserPrincipal() + + // Create 5 topics to exceed the cache size of 3 + val topicNames = (1 to 5).map(i => s"test-topic-$i") + + // Add errors for all 5 topics to the cache + topicNames.zipWithIndex.foreach { case (topicName, idx) => + val topics = Map( + topicName -> new CreatableTopic().setName(topicName).setNumPartitions(1).setReplicationFactor(1) + ) + + autoTopicCreationManager.createStreamsInternalTopics(topics, requestContext, config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs() * 2) + + val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) + Mockito.verify(brokerToController, Mockito.atLeastOnce()).sendRequest( + any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), + argumentCaptor.capture()) + + // Simulate error response for this topic + val createTopicsResponseData = new org.apache.kafka.common.message.CreateTopicsResponseData() + val topicResult = new org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult() + .setName(topicName) + .setErrorCode(Errors.TOPIC_ALREADY_EXISTS.code()) + .setErrorMessage(s"Topic '$topicName' already exists.") + createTopicsResponseData.topics().add(topicResult) + + val createTopicsResponse = new CreateTopicsResponse(createTopicsResponseData) + val header = new RequestHeader(ApiKeys.CREATE_TOPICS, 0, "client", 1) + val clientResponse = new ClientResponse(header, null, null, + 0, 0, false, null, null, createTopicsResponse) + + argumentCaptor.getValue.onComplete(clientResponse) + + // Advance time slightly between additions to ensure different timestamps + mockTime.sleep(10) + + } + + // With cache size of 3, topics 1 and 2 should have been evicted + val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(topicNames.toSet, mockTime.milliseconds()) + + // Only the last 3 topics should be in the cache (topics 3, 4, 5) + assertEquals(3, cachedErrors.size, "Cache should contain only the most recent 3 entries") + assertTrue(cachedErrors.contains("test-topic-3"), "test-topic-3 should be in cache") + assertTrue(cachedErrors.contains("test-topic-4"), "test-topic-4 should be in cache") + assertTrue(cachedErrors.contains("test-topic-5"), "test-topic-5 should be in cache") + assertTrue(!cachedErrors.contains("test-topic-1"), "test-topic-1 should have been evicted") + assertTrue(!cachedErrors.contains("test-topic-2"), "test-topic-2 should have been evicted") + } } diff --git a/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala index 7e2aa9ca65f16..e25733a6f0dc4 100644 --- a/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala @@ -31,7 +31,7 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.utils.MockTime import org.apache.kafka.network.Session import org.apache.kafka.network.metrics.RequestChannelMetrics -import org.apache.kafka.server.quota.ThrottleCallback +import org.apache.kafka.server.quota.{ClientQuotaManager, ThrottleCallback} import org.junit.jupiter.api.AfterEach import org.mockito.Mockito.mock @@ -81,6 +81,6 @@ class BaseClientQuotaManagerTest { protected def throttle(quotaManager: ClientQuotaManager, user: String, clientId: String, throttleTimeMs: Int, channelThrottlingCallback: ThrottleCallback): Unit = { val (_, request) = buildRequest(FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, 0, 1000, new util.HashMap[TopicPartition, PartitionData])) - quotaManager.throttle(request, channelThrottlingCallback, throttleTimeMs) + quotaManager.throttle(request.header.clientId(), request.session, channelThrottlingCallback, throttleTimeMs) } } diff --git a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala index 3a0ffe1b4779f..3ace4590aacd5 100644 --- a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala @@ -20,7 +20,7 @@ package kafka.server import kafka.api.IntegrationTestHarness import kafka.network.SocketServer import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.common.protocol.{ApiKeys, ByteBufferAccessor} import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, RequestHeader, ResponseHeader} import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.BrokerState @@ -96,7 +96,7 @@ abstract class BaseRequestTest extends IntegrationTestHarness { val responseBuffer = ByteBuffer.wrap(responseBytes) ResponseHeader.parse(responseBuffer, apiKey.responseHeaderVersion(version)) - AbstractResponse.parseResponse(apiKey, responseBuffer, version) match { + AbstractResponse.parseResponse(apiKey, new ByteBufferAccessor(responseBuffer), version) match { case response: T => response case response => throw new ClassCastException(s"Expected response with type ${classTag.runtimeClass}, but found ${response.getClass}") diff --git a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala deleted file mode 100644 index c7a4bd45f780c..0000000000000 --- a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import org.apache.kafka.common.test.api.{ClusterTest, Type} -import org.apache.kafka.clients.ClientResponse -import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic -import org.apache.kafka.common.message.{BrokerRegistrationRequestData, CreateTopicsRequestData} -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests._ -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.common.utils.Time -import org.apache.kafka.common.{Node, Uuid} -import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, Feature, MetadataVersion, MetadataVersionTestUtils, NodeToControllerChannelManager} -import org.junit.jupiter.api.Assertions.assertEquals - -import java.util -import java.util.Collections -import java.util.concurrent.{CompletableFuture, TimeUnit, TimeoutException} - -/** - * This test simulates a broker registering with the KRaft quorum under different configurations. - */ -class BrokerRegistrationRequestTest { - - def brokerToControllerChannelManager(clusterInstance: ClusterInstance): NodeToControllerChannelManager = { - new NodeToControllerChannelManagerImpl( - new ControllerNodeProvider() { - def node: Option[Node] = Some(new Node( - clusterInstance.anyControllerSocketServer().config.nodeId, - "127.0.0.1", - clusterInstance.anyControllerSocketServer().boundPort(clusterInstance.controllerListenerName().get()), - )) - - def listenerName: ListenerName = clusterInstance.controllerListenerName().get() - - val securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT - - val saslMechanism: String = "" - - override def getControllerInfo(): ControllerInformation = - ControllerInformation(node, listenerName, securityProtocol, saslMechanism) - }, - Time.SYSTEM, - new Metrics(), - clusterInstance.anyControllerSocketServer().config, - "heartbeat", - "test-heartbeat-", - 10000 - ) - } - - def sendAndReceive[T <: AbstractRequest, R <: AbstractResponse]( - channelManager: NodeToControllerChannelManager, - reqBuilder: AbstractRequest.Builder[T], - timeoutMs: Int - ): R = { - val responseFuture = new CompletableFuture[R]() - channelManager.sendRequest(reqBuilder, new ControllerRequestCompletionHandler() { - override def onTimeout(): Unit = responseFuture.completeExceptionally(new TimeoutException()) - - override def onComplete(response: ClientResponse): Unit = - responseFuture.complete(response.responseBody().asInstanceOf[R]) - }) - responseFuture.get(timeoutMs, TimeUnit.MILLISECONDS) - } - - def registerBroker( - channelManager: NodeToControllerChannelManager, - clusterId: String, - brokerId: Int, - zkEpoch: Option[Long], - featureLevelToSend: Option[(Short, Short)] - ): Errors = { - val features = new BrokerRegistrationRequestData.FeatureCollection() - featureLevelToSend.foreach { case (min, max) => - features.add(new BrokerRegistrationRequestData.Feature() - .setName(MetadataVersion.FEATURE_NAME) - .setMinSupportedVersion(min) - .setMaxSupportedVersion(max) - ) - } - Feature.PRODUCTION_FEATURES.stream().filter(_.featureName != MetadataVersion.FEATURE_NAME).forEach { - feature => - features.add(new BrokerRegistrationRequestData.Feature() - .setName(feature.featureName) - .setMinSupportedVersion(feature.minimumProduction()) - .setMaxSupportedVersion(feature.latestTesting())) - } - - val req = new BrokerRegistrationRequestData() - .setBrokerId(brokerId) - .setLogDirs(Collections.singletonList(Uuid.randomUuid())) - .setClusterId(clusterId) - .setIncarnationId(Uuid.randomUuid()) - .setIsMigratingZkBroker(zkEpoch.isDefined) - .setFeatures(features) - .setListeners(new BrokerRegistrationRequestData.ListenerCollection(util.Arrays.asList( - new BrokerRegistrationRequestData.Listener(). - setName("EXTERNAL"). - setHost("example.com"). - setPort(8082). - setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)) - .iterator())) - - val resp = sendAndReceive[BrokerRegistrationRequest, BrokerRegistrationResponse]( - channelManager, new BrokerRegistrationRequest.Builder(req), 30000) - Errors.forCode(resp.data().errorCode()) - } - - - def createTopics(channelManager: NodeToControllerChannelManager, - topicName: String): Errors = { - val createTopics = new CreateTopicsRequestData() - createTopics.setTopics(new CreateTopicsRequestData.CreatableTopicCollection()) - createTopics.topics().add(new CreatableTopic().setName(topicName).setNumPartitions(10).setReplicationFactor(1)) - createTopics.setTimeoutMs(500) - - val req = new CreateTopicsRequest.Builder(createTopics) - val resp = sendAndReceive[CreateTopicsRequest, CreateTopicsResponse](channelManager, req, 3000).data() - Errors.forCode(resp.topics().find(topicName).errorCode()) - } - - @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3) - def testRegisterZkWith33Controller(clusterInstance: ClusterInstance): Unit = { - // Verify that a controller running an old metadata.version cannot register a ZK broker - val clusterId = clusterInstance.clusterId() - val channelManager = brokerToControllerChannelManager(clusterInstance) - try { - channelManager.start() - // Invalid registration (isMigratingZkBroker, but MV does not support migrations) - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersionTestUtils.IBP_3_3_IV0_FEATURE_LEVEL, MetadataVersion.IBP_3_3_IV3.featureLevel)))) - - // No features (MV) sent with registration, controller can't verify - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), None)) - - // Given MV is too high for controller to support - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_4_IV0.featureLevel, MetadataVersion.IBP_3_4_IV0.featureLevel)))) - - // Controller supports this MV and isMigratingZkBroker is false, so this one works - assertEquals( - Errors.NONE, - registerBroker(channelManager, clusterId, 100, None, Some((MetadataVersion.IBP_3_3_IV3.featureLevel, MetadataVersion.IBP_3_4_IV0.featureLevel)))) - } finally { - channelManager.shutdown() - } - } -} diff --git a/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala index 6c268d3c3fbd9..c166eef801221 100644 --- a/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala @@ -16,17 +16,19 @@ */ package kafka.server -import kafka.server.ClientQuotaManager.BaseUserEntity - +import org.apache.kafka.common.Cluster import java.net.InetAddress +import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.server.config.ClientQuotaManagerConfig import org.apache.kafka.network.Session -import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaEntity, ClientQuotaManager, ClientQuotaType, QuotaType} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test +import java.util.{Collections, Map, HashMap, Optional} + class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { private val config = new ClientQuotaManagerConfig() @@ -38,12 +40,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Some(new Quota(2000, true)) + Optional.of(new Quota(2000, true)) ) clientQuotaManager.updateQuota( client2.configUser, client2.configClientEntity, - Some(new Quota(4000, true)) + Optional.of(new Quota(4000, true)) ) assertEquals(Long.MaxValue.toDouble, clientQuotaManager.quota(randomClient.user, randomClient.clientId).bound, 0.0, @@ -62,7 +64,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Some(new Quota(3000, true)) + Optional.of(new Quota(3000, true)) ) assertEquals(3000, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the newly overridden value (3000)") @@ -73,7 +75,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Some(new Quota(500, true)) + Optional.of(new Quota(500, true)) ) assertEquals(500, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the default value (500)") @@ -84,12 +86,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - None + Optional.empty ) clientQuotaManager.updateQuota( defaultConfigClient.configUser, defaultConfigClient.configClientEntity, - Some(new Quota(4000, true)) + Optional.of(new Quota(4000, true)) ) assertEquals(4000, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the newly overridden value (4000)") @@ -106,10 +108,10 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { */ @Test def testUserQuotaParsingWithDefaultClientIdQuota(): Unit = { - val client1 = UserClient("User1", "p1", Some(ClientQuotaManager.UserEntity("User1")), None) - val client2 = UserClient("User2", "p2", Some(ClientQuotaManager.UserEntity("User2")), None) - val randomClient = UserClient("RandomUser", "random-client-id", None, None) - val defaultConfigClient = UserClient("", "", Some(ClientQuotaManager.DefaultUserEntity), None) + val client1 = UserClient("User1", "p1", Optional.of(new ClientQuotaManager.UserEntity("User1")), Optional.empty) + val client2 = UserClient("User2", "p2", Optional.of(new ClientQuotaManager.UserEntity("User2")), Optional.empty) + val randomClient = UserClient("RandomUser", "random-client-id", Optional.empty, Optional.empty) + val defaultConfigClient = UserClient("", "", Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } @@ -119,7 +121,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val expectedMaxValueInQuotaWindow = if (expectedBound < Long.MaxValue) config.quotaWindowSizeSeconds * (config.numQuotaSamples - 1) * expectedBound.toDouble else Double.MaxValue - assertEquals(expectedMaxValueInQuotaWindow, quotaManager.getMaxValueInQuotaWindow(session, clientId), 0.01) + assertEquals(expectedMaxValueInQuotaWindow, quotaManager.maxValueInQuotaWindow(session, clientId), 0.01) val throttleTimeMs = maybeRecord(quotaManager, user, clientId, value * config.numQuotaSamples) if (expectThrottle) @@ -129,7 +131,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { } @Test - def testGetMaxValueInQuotaWindowWithNonDefaultQuotaWindow(): Unit = { + def testMaxValueInQuotaWindowWithNonDefaultQuotaWindow(): Unit = { val numFullQuotaWindows = 3 // 3 seconds window (vs. 10 seconds default) val nonDefaultConfig = new ClientQuotaManagerConfig(numFullQuotaWindows + 1) val clientQuotaManager = new ClientQuotaManager(nonDefaultConfig, metrics, QuotaType.FETCH, time, "") @@ -137,15 +139,15 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { // no quota set - assertEquals(Double.MaxValue, clientQuotaManager.getMaxValueInQuotaWindow(userSession, "client1"), 0.01) + assertEquals(Double.MaxValue, clientQuotaManager.maxValueInQuotaWindow(userSession, "client1"), 0.01) // Set default quota config clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - None, - Some(new Quota(10, true)) + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.empty, + Optional.of(new Quota(10, true)) ) - assertEquals(10 * numFullQuotaWindows, clientQuotaManager.getMaxValueInQuotaWindow(userSession, "client1"), 0.01) + assertEquals(10 * numFullQuotaWindows, clientQuotaManager.maxValueInQuotaWindow(userSession, "client1"), 0.01) } finally { clientQuotaManager.shutdown() } @@ -163,17 +165,17 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Set default quota config clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - None, - Some(new Quota(10, true)) + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.empty, + Optional.of(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove default quota config, back to no quotas clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - None, - None + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.empty, + Optional.empty ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -183,24 +185,24 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testSetAndRemoveUserQuota(): Unit = { - // quotaTypesEnabled will be QuotaTypes.NoQuotas initially + val clientQuotaManager = new ClientQuotaManager(new ClientQuotaManagerConfig(), metrics, QuotaType.PRODUCE, time, "") try { // Set quota config clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - None, - Some(new Quota(10, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.empty, + Optional.of(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove quota config, back to no quotas clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - None, - None + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.empty, + Optional.empty ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -217,17 +219,17 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { // Set quota config clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - Some(new Quota(10, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.of(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove quota config, back to no quotas clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - None + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.empty ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -242,54 +244,54 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - None, - Some(new Quota(1000, true)) + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.empty, + Optional.of(new Quota(1000, true)) ) clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(2000, true)) + Optional.empty, + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(2000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(3000, true)) + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(3000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - None, - Some(new Quota(4000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.empty, + Optional.of(new Quota(4000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - Some(new Quota(5000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.of(new Quota(5000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userB")), - None, - Some(new Quota(6000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userB")), + Optional.empty, + Optional.of(new Quota(6000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userB")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - Some(new Quota(7000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userB")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.of(new Quota(7000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userB")), - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(8000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userB")), + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(8000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userC")), - None, - Some(new Quota(10000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userC")), + Optional.empty, + Optional.of(new Quota(10000, true)) ) clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.ClientIdEntity("client1")), - Some(new Quota(9000, true)) + Optional.empty, + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.of(new Quota(9000, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 5000, 4500, expectThrottle = false) // quota takes precedence over @@ -307,9 +309,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Remove default quota config, revert to default clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - Some(ClientQuotaManager.DefaultClientIdEntity), - None + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.empty ) checkQuota(clientQuotaManager, "userD", "client1", 1000, 0, expectThrottle = false) // Metrics tags changed, restart counter checkQuota(clientQuotaManager, "userE", "client4", 1000, 1500, expectThrottle = true) @@ -318,9 +320,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Remove default quota config, revert to default clientQuotaManager.updateQuota( - Some(ClientQuotaManager.DefaultUserEntity), - None, - None + Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), + Optional.empty, + Optional.empty ) checkQuota(clientQuotaManager, "userF", "client4", 2000, 0, expectThrottle = false) // Default quota shared across client-id of all users checkQuota(clientQuotaManager, "userF", "client5", 2000, 0, expectThrottle = false) @@ -329,40 +331,40 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Update quotas clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - None, - Some(new Quota(8000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.empty, + Optional.of(new Quota(8000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - Some(new Quota(10000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.of(new Quota(10000, true)) ) checkQuota(clientQuotaManager, "userA", "client2", 8000, 0, expectThrottle = false) checkQuota(clientQuotaManager, "userA", "client2", 8000, 4500, expectThrottle = true) // Throttled due to sum of new and earlier values checkQuota(clientQuotaManager, "userA", "client1", 10000, 0, expectThrottle = false) checkQuota(clientQuotaManager, "userA", "client1", 10000, 6000, expectThrottle = true) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client1")), - None + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), + Optional.empty ) checkQuota(clientQuotaManager, "userA", "client6", 8000, 0, expectThrottle = true) // Throttled due to shared user quota clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client6")), - Some(new Quota(11000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client6")), + Optional.of(new Quota(11000, true)) ) checkQuota(clientQuotaManager, "userA", "client6", 11000, 8500, expectThrottle = false) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(12000, true)) + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(12000, true)) ) clientQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("userA")), - Some(ClientQuotaManager.ClientIdEntity("client6")), - None + Optional.of(new ClientQuotaManager.UserEntity("userA")), + Optional.of(new ClientQuotaManager.ClientIdEntity("client6")), + Optional.empty ) checkQuota(clientQuotaManager, "userA", "client6", 12000, 4000, expectThrottle = true) // Throttled due to sum of new and earlier values @@ -377,9 +379,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", "Produce", "")) try { clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(500, true)) + Optional.empty, + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(500, true)) ) // We have 10 seconds windows. Make sure that there is no quota violation @@ -401,12 +403,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { throttle(clientQuotaManager, "ANONYMOUS", "unknown", throttleTime, callback) assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - clientQuotaManager.throttledChannelReaper.doWork() + clientQuotaManager.processThrottledChannelReaperDoWork assertEquals(0, numCallbacks) time.sleep(throttleTime) // Callback can only be triggered after the delay time passes - clientQuotaManager.throttledChannelReaper.doWork() + clientQuotaManager.processThrottledChannelReaperDoWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) @@ -428,9 +430,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.PRODUCE, time, "") try { clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(500, true)) + Optional.empty, + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", "client1", 100) @@ -453,9 +455,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.PRODUCE, time, "") try { clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(500, true)) + Optional.empty, + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", "client1", 100) @@ -483,9 +485,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientId = "client@#$%" try { clientQuotaManager.updateQuota( - None, - Some(ClientQuotaManager.DefaultClientIdEntity), - Some(new Quota(500, true)) + Optional.empty, + Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), + Optional.of(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", clientId, 100) @@ -501,10 +503,126 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { } } + @Test + def testQuotaTypesEnabledUpdatesWithDefaultCallback(): Unit = { + val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.CONTROLLER_MUTATION, time, "") + try { + assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled()) + assertFalse(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(5, true))) + assertEquals(ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.of(new Quota(5, true))) + assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client2")), Optional.of(new Quota(5, true))) + assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userB")), Optional.empty(), Optional.of(new Quota(5, true))) + assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(10, true))) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.empty()) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userB")), Optional.empty(), Optional.empty()) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client2")), Optional.empty()) + assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) + assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled) + assertFalse(clientQuotaManager.quotasEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) + assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled) + assertFalse(clientQuotaManager.quotasEnabled) + } finally { + clientQuotaManager.shutdown() + } + } + + @Test + def testQuotaTypesEnabledUpdatesWithCustomCallback(): Unit = { + val customQuotaCallback = new ClientQuotaCallback { + val quotas = new HashMap[ClientQuotaEntity, Quota]() + override def configure(configs: Map[String, _]): Unit = {} + + override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): Map[String, String] = Collections.emptyMap() + + override def quotaLimit(quotaType: ClientQuotaType, metricTags: Map[String, String]): java.lang.Double = 1 + override def updateClusterMetadata(cluster: Cluster): Boolean = false + + override def updateQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity, newValue: Double): Unit = { + quotas.put(entity.asInstanceOf[ClientQuotaManager.KafkaQuotaEntity], new Quota(newValue.toLong, true)) + } + + override def removeQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity): Unit = { + quotas.remove(entity.asInstanceOf[ClientQuotaManager.KafkaQuotaEntity]) + } + + override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = false + + override def close(): Unit = {} + } + val clientQuotaManager = new ClientQuotaManager( + new ClientQuotaManagerConfig(), + metrics, + QuotaType.CONTROLLER_MUTATION, + time, + "", + Optional.of(Plugin.wrapInstance(customQuotaCallback, metrics, "")) + ) + + try { + assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should be true with custom callback") + + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) + assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.of(new Quota(12, true))) + assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) + assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) + assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") + + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) + clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.empty()) + clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) + assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled()) + assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") + } finally { + clientQuotaManager.shutdown() + } + } + private case class UserClient( user: String, clientId: String, - configUser: Option[BaseUserEntity] = None, - configClientEntity: Option[ClientQuotaManager.ClientIdEntity] = None + configUser: Optional[ClientQuotaEntity.ConfigEntity] = Optional.empty, + configClientEntity: Optional[ClientQuotaEntity.ConfigEntity] = Optional.empty ) } diff --git a/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala b/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala deleted file mode 100644 index 8c30f749427fc..0000000000000 --- a/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala +++ /dev/null @@ -1,592 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import java.net.InetAddress -import java.util -import java.util.concurrent.{ExecutionException, TimeUnit} -import org.apache.kafka.common.test.api.ClusterTest -import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.{ScramCredentialInfo, ScramMechanism, UserScramCredentialUpsertion} -import org.apache.kafka.common.errors.{InvalidRequestException, UnsupportedVersionException} -import org.apache.kafka.common.internals.KafkaFutureImpl -import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} -import org.apache.kafka.common.requests.{AlterClientQuotasRequest, AlterClientQuotasResponse, DescribeClientQuotasRequest, DescribeClientQuotasResponse} -import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.config.QuotaConfig -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Disabled - -import scala.jdk.CollectionConverters._ - -class ClientQuotasRequestTest(cluster: ClusterInstance) { - @ClusterTest - def testAlterClientQuotasRequest(): Unit = { - - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user", ClientQuotaEntity.CLIENT_ID -> "client-id").asJava) - - // Expect an empty configuration. - verifyDescribeEntityQuotas(entity, Map.empty) - - // Add two configuration entries. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - - // Update an existing entry. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(15000.0) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 15000.0, - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - - // Remove an existing configuration entry. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> None - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - - // Remove a non-existent configuration entry. This should make no changes. - alterEntityQuotas(entity, Map( - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - - // Add back a deleted configuration entry. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(5000.0) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 5000.0, - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - - // Perform a mixed update. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> None, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.3) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 12.3 - )) - } - - @ClusterTest - def testAlterClientQuotasRequestValidateOnly(): Unit = { - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) - - // Set up a configuration. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(23.45) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 - )) - - // Validate-only addition. - alterEntityQuotas(entity, Map( - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(50000.0) - ), validateOnly = true) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 - )) - - // Validate-only modification. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0) - ), validateOnly = true) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 - )) - - // Validate-only removal. - alterEntityQuotas(entity, Map( - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None - ), validateOnly = true) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 - )) - - // Validate-only mixed update. - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(50000.0), - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None - ), validateOnly = true) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 - )) - } - - @Disabled("TODO: KAFKA-17630 - Convert ClientQuotasRequestTest#testClientQuotasForScramUsers to kraft") - @ClusterTest - def testClientQuotasForScramUsers(): Unit = { - val userName = "user" - - val admin = cluster.admin() - try { - val results = admin.alterUserScramCredentials(util.Arrays.asList( - new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "password"))) - results.all.get - - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> userName).asJava) - - verifyDescribeEntityQuotas(entity, Map.empty) - - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) - } finally { - admin.close() - } - } - - @ClusterTest - def testAlterIpQuotasRequest(): Unit = { - val knownHost = "1.2.3.4" - val unknownHost = "2.3.4.5" - val entity = toIpEntity(Some(knownHost)) - val defaultEntity = toIpEntity(Some(null)) - val entityFilter = ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.IP, knownHost) - val defaultEntityFilter = ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.IP) - val allIpEntityFilter = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP) - - def verifyIpQuotas(entityFilter: ClientQuotaFilterComponent, expectedMatches: Map[ClientQuotaEntity, Double]): Unit = { - TestUtils.tryUntilNoAssertionError() { - val result = describeClientQuotas(ClientQuotaFilter.containsOnly(List(entityFilter).asJava)) - assertEquals(expectedMatches.keySet, result.asScala.keySet) - result.asScala.foreach { case (entity, props) => - assertEquals(Set(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG), props.asScala.keySet) - assertEquals(expectedMatches(entity), props.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG)) - val entityName = entity.entries.get(ClientQuotaEntity.IP) - // ClientQuotaEntity with null name maps to default entity - val entityIp = if (entityName == null) - InetAddress.getByName(unknownHost) - else - InetAddress.getByName(entityName) - var currentServerQuota = 0 - currentServerQuota = cluster.brokerSocketServers().asScala.head.connectionQuotas.connectionRateForIp(entityIp) - assertTrue(Math.abs(expectedMatches(entity) - currentServerQuota) < 0.01, - s"Connection quota of $entity is not ${expectedMatches(entity)} but $currentServerQuota") - } - } - } - - // Expect an empty configuration. - verifyIpQuotas(allIpEntityFilter, Map.empty) - - // Add a configuration entry. - alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(100.0)), validateOnly = false) - verifyIpQuotas(entityFilter, Map(entity -> 100.0)) - - // update existing entry - alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(150.0)), validateOnly = false) - verifyIpQuotas(entityFilter, Map(entity -> 150.0)) - - // update default value - alterEntityQuotas(defaultEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(200.0)), validateOnly = false) - verifyIpQuotas(defaultEntityFilter, Map(defaultEntity -> 200.0)) - - // describe all IP quotas - verifyIpQuotas(allIpEntityFilter, Map(entity -> 150.0, defaultEntity -> 200.0)) - - // remove entry - alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> None), validateOnly = false) - verifyIpQuotas(entityFilter, Map.empty) - - // remove default value - alterEntityQuotas(defaultEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> None), validateOnly = false) - verifyIpQuotas(allIpEntityFilter, Map.empty) - } - - @ClusterTest - def testAlterClientQuotasInvalidRequests(): Unit = { - var entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "").asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) - - entity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> "").asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) - - entity = new ClientQuotaEntity(Map("" -> "name").asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) - - entity = new ClientQuotaEntity(Map.empty.asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.5)), validateOnly = true)) - - entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map("bad" -> Some(1.0)), validateOnly = true)) - - entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) - assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.5)), validateOnly = true)) - } - - private def expectInvalidRequestWithMessage(runnable: => Unit, expectedMessage: String): Unit = { - val exception = assertThrows(classOf[InvalidRequestException], () => runnable) - assertTrue(exception.getMessage.contains(expectedMessage), s"Expected message $exception to contain $expectedMessage") - } - - @ClusterTest - def testAlterClientQuotasInvalidEntityCombination(): Unit = { - val userAndIpEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user", ClientQuotaEntity.IP -> "1.2.3.4").asJava) - val clientAndIpEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> "client", ClientQuotaEntity.IP -> "1.2.3.4").asJava) - val expectedExceptionMessage = "Invalid quota entity combination" - expectInvalidRequestWithMessage(alterEntityQuotas(userAndIpEntity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), - validateOnly = true), expectedExceptionMessage) - expectInvalidRequestWithMessage(alterEntityQuotas(clientAndIpEntity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), - validateOnly = true), expectedExceptionMessage) - } - - @ClusterTest - def testAlterClientQuotasBadIp(): Unit = { - val invalidHostPatternEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.IP -> "not a valid host because it has spaces").asJava) - val unresolvableHostEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.IP -> "RFC2606.invalid").asJava) - val expectedExceptionMessage = "not a valid IP" - expectInvalidRequestWithMessage(alterEntityQuotas(invalidHostPatternEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(50.0)), - validateOnly = true), expectedExceptionMessage) - expectInvalidRequestWithMessage(alterEntityQuotas(unresolvableHostEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(50.0)), - validateOnly = true), expectedExceptionMessage) - } - - @ClusterTest - def testDescribeClientQuotasInvalidFilterCombination(): Unit = { - val ipFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP) - val userFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER) - val clientIdFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID) - val expectedExceptionMessage = "Invalid entity filter component combination" - expectInvalidRequestWithMessage(describeClientQuotas(ClientQuotaFilter.contains(List(ipFilterComponent, userFilterComponent).asJava)), - expectedExceptionMessage) - expectInvalidRequestWithMessage(describeClientQuotas(ClientQuotaFilter.contains(List(ipFilterComponent, clientIdFilterComponent).asJava)), - expectedExceptionMessage) - } - - // Entities to be matched against. - private val matchUserClientEntities = List( - (Some("user-1"), Some("client-id-1"), 50.50), - (Some("user-2"), Some("client-id-1"), 51.51), - (Some("user-3"), Some("client-id-2"), 52.52), - (Some(null), Some("client-id-1"), 53.53), - (Some("user-1"), Some(null), 54.54), - (Some("user-3"), Some(null), 55.55), - (Some("user-1"), None, 56.56), - (Some("user-2"), None, 57.57), - (Some("user-3"), None, 58.58), - (Some(null), None, 59.59), - (None, Some("client-id-2"), 60.60) - ).map { case (u, c, v) => (toClientEntity(u, c), v) } - - private val matchIpEntities = List( - (Some("1.2.3.4"), 10.0), - (Some("2.3.4.5"), 20.0) - ).map { case (ip, quota) => (toIpEntity(ip), quota)} - - private def setupDescribeClientQuotasMatchTest(): Unit = { - val userClientQuotas = matchUserClientEntities.map { case (e, v) => - e -> Map((QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Some(v))) - }.toMap - val ipQuotas = matchIpEntities.map { case (e, v) => - e -> Map((QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Some(v))) - }.toMap - val result = alterClientQuotas(userClientQuotas ++ ipQuotas, validateOnly = false) - (matchUserClientEntities ++ matchIpEntities).foreach(e => result(e._1).get(10, TimeUnit.SECONDS)) - } - - @ClusterTest - def testDescribeClientQuotasMatchExact(): Unit = { - setupDescribeClientQuotasMatchTest() - - def matchEntity(entity: ClientQuotaEntity) = { - val components = entity.entries.asScala.map { case (entityType, entityName) => - entityName match { - case null => ClientQuotaFilterComponent.ofDefaultEntity(entityType) - case name => ClientQuotaFilterComponent.ofEntity(entityType, name) - } - } - describeClientQuotas(ClientQuotaFilter.containsOnly(components.toList.asJava)) - } - - // Test exact matches. - matchUserClientEntities.foreach { case (e, v) => - TestUtils.tryUntilNoAssertionError() { - val result = matchEntity(e) - assertEquals(1, result.size) - assertTrue(result.get(e) != null) - val value = result.get(e).get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG) - assertNotNull(value) - assertEquals(value, v, 1e-6) - } - } - - // Entities not contained in `matchEntityList`. - val notMatchEntities = List( - (Some("user-1"), Some("client-id-2")), - (Some("user-3"), Some("client-id-1")), - (Some("user-2"), Some(null)), - (Some("user-4"), None), - (Some(null), Some("client-id-2")), - (None, Some("client-id-1")), - (None, Some("client-id-3")), - ).map { case (u, c) => - new ClientQuotaEntity((u.map((ClientQuotaEntity.USER, _)) ++ - c.map((ClientQuotaEntity.CLIENT_ID, _))).toMap.asJava) - } - - // Verify exact matches of the non-matches returns empty. - notMatchEntities.foreach { e => - val result = matchEntity(e) - assertEquals(0, result.size) - } - } - - @ClusterTest - def testDescribeClientQuotasMatchPartial(): Unit = { - setupDescribeClientQuotasMatchTest() - - def testMatchEntities(filter: ClientQuotaFilter, expectedMatchSize: Int, partition: ClientQuotaEntity => Boolean): Unit = { - TestUtils.tryUntilNoAssertionError() { - val result = describeClientQuotas(filter) - val (expectedMatches, _) = (matchUserClientEntities ++ matchIpEntities).partition(e => partition(e._1)) - assertEquals(expectedMatchSize, expectedMatches.size) // for test verification - assertEquals(expectedMatchSize, result.size, s"Failed to match $expectedMatchSize entities for $filter") - val expectedMatchesMap = expectedMatches.toMap - matchUserClientEntities.foreach { case (entity, expectedValue) => - if (expectedMatchesMap.contains(entity)) { - val config = result.get(entity) - assertNotNull(config) - val value = config.get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG) - assertNotNull(value) - assertEquals(expectedValue, value, 1e-6) - } else { - assertNull(result.get(entity)) - } - } - matchIpEntities.foreach { case (entity, expectedValue) => - if (expectedMatchesMap.contains(entity)) { - val config = result.get(entity) - assertNotNull(config) - val value = config.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG) - assertNotNull(value) - assertEquals(expectedValue, value, 1e-6) - } else { - assertNull(result.get(entity)) - } - } - } - } - - // Match open-ended existing user. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-1")).asJava), 3, - entity => entity.entries.get(ClientQuotaEntity.USER) == "user-1" - ) - - // Match open-ended non-existent user. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "unknown")).asJava), 0, - entity => false - ) - - // Match open-ended existing client ID. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-2")).asJava), 2, - entity => entity.entries.get(ClientQuotaEntity.CLIENT_ID) == "client-id-2" - ) - - // Match open-ended default user. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)).asJava), 2, - entity => entity.entries.containsKey(ClientQuotaEntity.USER) && entity.entries.get(ClientQuotaEntity.USER) == null - ) - - // Match close-ended existing user. - testMatchEntities( - ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-2")).asJava), 1, - entity => entity.entries.get(ClientQuotaEntity.USER) == "user-2" && !entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) - ) - - // Match close-ended existing client ID that has no matching entity. - testMatchEntities( - ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-1")).asJava), 0, - entity => false - ) - - // Match against all entities with the user type in a close-ended match. - testMatchEntities( - ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER)).asJava), 4, - entity => entity.entries.containsKey(ClientQuotaEntity.USER) && !entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) - ) - - // Match against all entities with the user type in an open-ended match. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER)).asJava), 10, - entity => entity.entries.containsKey(ClientQuotaEntity.USER) - ) - - // Match against all entities with the client ID type in a close-ended match. - testMatchEntities( - ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID)).asJava), 1, - entity => entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) && !entity.entries.containsKey(ClientQuotaEntity.USER) - ) - - // Match against all entities with the client ID type in an open-ended match. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID)).asJava), 7, - entity => entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) - ) - - // Match against all entities with IP type in an open-ended match. - testMatchEntities( - ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP)).asJava), 2, - entity => entity.entries.containsKey(ClientQuotaEntity.IP) - ) - - // Match open-ended empty filter list. This should match all entities. - testMatchEntities(ClientQuotaFilter.contains(List.empty.asJava), 13, entity => true) - - // Match close-ended empty filter list. This should match no entities. - testMatchEntities(ClientQuotaFilter.containsOnly(List.empty.asJava), 0, _ => false) - } - - @ClusterTest - def testClientQuotasUnsupportedEntityTypes(): Unit = { - val entity = new ClientQuotaEntity(Map("other" -> "name").asJava) - assertThrows(classOf[UnsupportedVersionException], () => verifyDescribeEntityQuotas(entity, Map.empty)) - } - - @ClusterTest - def testClientQuotasSanitized(): Unit = { - // An entity with name that must be sanitized when writing to Zookeeper. - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user with spaces").asJava) - - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, - )) - } - - private def verifyDescribeEntityQuotas(entity: ClientQuotaEntity, quotas: Map[String, Double]): Unit = { - TestUtils.tryUntilNoAssertionError(waitTime = 5000L) { - val components = entity.entries.asScala.map { case (entityType, entityName) => - Option(entityName).map{ name => ClientQuotaFilterComponent.ofEntity(entityType, name)} - .getOrElse(ClientQuotaFilterComponent.ofDefaultEntity(entityType) - ) - } - val describe = describeClientQuotas(ClientQuotaFilter.containsOnly(components.toList.asJava)) - if (quotas.isEmpty) { - assertEquals(0, describe.size) - } else { - assertEquals(1, describe.size) - val configs = describe.get(entity) - assertNotNull(configs) - assertEquals(quotas.size, configs.size) - quotas.foreach { case (k, v) => - val value = configs.get(k) - assertNotNull(value) - assertEquals(v, value, 1e-6) - } - } - } - } - - private def toClientEntity(user: Option[String], clientId: Option[String]) = - new ClientQuotaEntity((user.map(ClientQuotaEntity.USER -> _) ++ clientId.map(ClientQuotaEntity.CLIENT_ID -> _)).toMap.asJava) - - private def toIpEntity(ip: Option[String]) = new ClientQuotaEntity(ip.map(ClientQuotaEntity.IP -> _).toMap.asJava) - - private def describeClientQuotas(filter: ClientQuotaFilter) = { - val result = new KafkaFutureImpl[java.util.Map[ClientQuotaEntity, java.util.Map[String, java.lang.Double]]] - sendDescribeClientQuotasRequest(filter).complete(result) - try result.get catch { - case e: ExecutionException => throw e.getCause - } - } - - private def sendDescribeClientQuotasRequest(filter: ClientQuotaFilter): DescribeClientQuotasResponse = { - val request = new DescribeClientQuotasRequest.Builder(filter).build() - IntegrationTestUtils.connectAndReceive[DescribeClientQuotasResponse](request, - destination = cluster.anyBrokerSocketServer(), - listenerName = cluster.clientListener()) - } - - private def alterEntityQuotas(entity: ClientQuotaEntity, alter: Map[String, Option[Double]], validateOnly: Boolean) = - try alterClientQuotas(Map(entity -> alter), validateOnly)(entity).get(10, TimeUnit.SECONDS) catch { - case e: ExecutionException => throw e.getCause - } - - private def alterClientQuotas(request: Map[ClientQuotaEntity, Map[String, Option[Double]]], validateOnly: Boolean) = { - val entries = request.map { case (entity, alter) => - val ops = alter.map { case (key, value) => - new ClientQuotaAlteration.Op(key, value.map(Double.box).orNull) - }.asJavaCollection - new ClientQuotaAlteration(entity, ops) - } - - val response = request.map(e => e._1 -> new KafkaFutureImpl[Void]).asJava - sendAlterClientQuotasRequest(entries, validateOnly).complete(response) - val result = response.asScala - assertEquals(request.size, result.size) - request.foreach(e => assertTrue(result.contains(e._1))) - result - } - - private def sendAlterClientQuotasRequest(entries: Iterable[ClientQuotaAlteration], validateOnly: Boolean): AlterClientQuotasResponse = { - val request = new AlterClientQuotasRequest.Builder(entries.asJavaCollection, validateOnly).build() - IntegrationTestUtils.connectAndReceive[AlterClientQuotasResponse](request, - destination = cluster.anyBrokerSocketServer(), - listenerName = cluster.clientListener()) - } - -} diff --git a/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala index 368280d235453..9b8e85c44e7ba 100644 --- a/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala @@ -18,7 +18,8 @@ package kafka.server import org.apache.kafka.common.metrics.Quota import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} +import org.apache.kafka.server.quota.ClientQuotaEntity import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -30,10 +31,13 @@ class ClientRequestQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testRequestPercentageQuotaViolation(): Unit = { val clientRequestQuotaManager = new ClientRequestQuotaManager(config, metrics, time, "", Optional.empty()) + val userEntity: ClientQuotaEntity.ConfigEntity = new ClientQuotaManager.UserEntity("ANONYMOUS") + val clientEntity: ClientQuotaEntity.ConfigEntity = new ClientQuotaManager.ClientIdEntity("test-client") + clientRequestQuotaManager.updateQuota( - Some(ClientQuotaManager.UserEntity("ANONYMOUS")), - Some(ClientQuotaManager.ClientIdEntity("test-client")), - Some(Quota.upperBound(1)) + Optional.of(userEntity), + Optional.of(clientEntity), + Optional.of(Quota.upperBound(1)) ) val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", QuotaType.REQUEST.toString, "")) def millisToPercent(millis: Double) = millis * 1000 * 1000 * ClientRequestQuotaManager.NANOS_TO_PERCENTAGE_PER_SECOND @@ -59,12 +63,12 @@ class ClientRequestQuotaManagerTest extends BaseClientQuotaManagerTest { throttle(clientRequestQuotaManager, "ANONYMOUS", "test-client", throttleTime, callback) assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - clientRequestQuotaManager.throttledChannelReaper.doWork() + clientRequestQuotaManager.processThrottledChannelReaperDoWork() assertEquals(0, numCallbacks) time.sleep(throttleTime) // Callback can only be triggered after the delay time passes - clientRequestQuotaManager.throttledChannelReaper.doWork() + clientRequestQuotaManager.processThrottledChannelReaperDoWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala index 33bd1c174defa..0f55feccb46a0 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala @@ -207,6 +207,20 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo ) assertEquals(expected, actual) + + val unknownGroupResponse = consumerGroupDescribe( + groupIds = List("grp-unknown"), + includeAuthorizedOperations = true, + version = version.toShort, + ) + assertEquals(Errors.GROUP_ID_NOT_FOUND.code, unknownGroupResponse.head.errorCode()) + + val emptyGroupResponse = consumerGroupDescribe( + groupIds = List(""), + includeAuthorizedOperations = true, + version = version.toShort, + ) + assertEquals(Errors.INVALID_GROUP_ID.code, emptyGroupResponse.head.errorCode()) } } finally { admin.close() diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala index baf13cde2f0cf..506d0007924bb 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala @@ -301,6 +301,48 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC } } + @ClusterTest + def testEmptyConsumerGroupId(): Unit = { + val admin = cluster.admin() + + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("") + .setMemberId(Uuid.randomUuid().toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava), + true + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REQUEST.code + }, msg = s"Did not receive the expected error. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(Errors.INVALID_REQUEST.code, consumerGroupHeartbeatResponse.data.errorCode) + assertEquals("GroupId can't be empty.", consumerGroupHeartbeatResponse.data.errorMessage) + } finally { + admin.close() + } + } + @ClusterTest def testConsumerGroupHeartbeatWithEmptySubscription(): Unit = { val admin = cluster.admin() diff --git a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala index 37c81ce20e508..1b1dec69eaf73 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala @@ -19,7 +19,7 @@ package kafka.server import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchResponseData, SyncGroupResponseData} +import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchRequestData, OffsetFetchResponseData, SyncGroupResponseData} import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance @@ -690,7 +690,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord val topicName = "foo" // Create the topic. - createTopic( + val topicId = createTopic( topic = topicName, numPartitions = 3 ) @@ -702,6 +702,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = "member-id", memberEpoch = -1, topic = topicName, + topicId = topicId, partition = 0, offset = 1000L, expectedError = Errors.NONE, @@ -765,7 +766,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -865,6 +866,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = memberId1, memberEpoch = 1, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + 10 * version + partitionId, expectedError = Errors.NONE, @@ -881,7 +883,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord .setGroupId(groupId) .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -895,14 +898,16 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).asJava) ).asJava), fetchOffsets( - groupId = groupId, - memberId = memberId1, - memberEpoch = 1, - partitions = List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1), - new TopicPartition("foo", 2) - ), + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(1) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 2).asJava) + ).asJava), requireStable = false, version = version.toShort ) @@ -1096,7 +1101,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -1164,6 +1169,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = memberId1, memberEpoch = 1, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + 10 * version + partitionId, expectedError = Errors.NONE, @@ -1180,7 +1186,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord .setGroupId(groupId) .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -1194,14 +1201,16 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).asJava) ).asJava), fetchOffsets( - groupId = groupId, - memberId = memberId1, - memberEpoch = 1, - partitions = List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1), - new TopicPartition("foo", 2) - ), + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(1) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 2).asJava) + ).asJava), requireStable = false, version = version.toShort ) diff --git a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala index e302bd721e161..43c7d5aecf464 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala @@ -18,7 +18,6 @@ package kafka.server import kafka.network.RequestChannel -import kafka.raft.RaftManager import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache import org.apache.kafka.clients.admin.AlterConfigOp @@ -49,18 +48,22 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.test.MockController import org.apache.kafka.common.utils.MockTime import org.apache.kafka.common.{ElectionType, Uuid} +import org.apache.kafka.common.requests.RequestHeader import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.apache.kafka.controller.{Controller, ControllerRequestContext, ResultOrError} import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.network.metrics.RequestChannelMetrics -import org.apache.kafka.raft.QuorumConfig +import org.apache.kafka.network.Session +import org.apache.kafka.raft.{QuorumConfig, RaftManager} import org.apache.kafka.server.SimpleApiVersionManager import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult, Authorizer} import org.apache.kafka.server.common.{ApiMessageAndVersion, FinalizedFeatures, KRaftVersion, MetadataVersion, ProducerIdsBlock, RequestLocal} import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs} +import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager} import org.apache.kafka.server.util.FutureUtils import org.apache.kafka.storage.internals.log.CleanerConfig +import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} import org.junit.jupiter.params.ParameterizedTest @@ -113,14 +116,16 @@ class ControllerApisTest { private val clientRequestQuotaManager: ClientRequestQuotaManager = mock(classOf[ClientRequestQuotaManager]) private val neverThrottlingClientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) when(neverThrottlingClientControllerQuotaManager.newQuotaFor( - any(classOf[RequestChannel.Request]), + any(classOf[Session]), + any(classOf[RequestHeader]), any(classOf[Short]) )).thenReturn( MockControllerMutationQuota(Integer.MAX_VALUE) // never throttles ) private val alwaysThrottlingClientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) when(alwaysThrottlingClientControllerQuotaManager.newQuotaFor( - any(classOf[RequestChannel.Request]), + any(classOf[Session]), + any(classOf[RequestHeader]), any(classOf[Short]) )).thenReturn( MockControllerMutationQuota(0) // always throttles @@ -951,18 +956,18 @@ class ControllerApisTest { controllerApis = createControllerApis(None, controller, props) val request = new DeleteTopicsRequestData() request.topics().add(new DeleteTopicState().setName("foo").setTopicId(ZERO_UUID)) - assertThrows(classOf[TopicDeletionDisabledException], - () => controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, - ApiKeys.DELETE_TOPICS.latestVersion().toInt, - hasClusterAuth = false, - _ => Set("foo", "bar"), - _ => Set("foo", "bar"))) - assertThrows(classOf[InvalidRequestException], - () => controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, - 1, - hasClusterAuth = false, - _ => Set("foo", "bar"), - _ => Set("foo", "bar"))) + + TestUtils.assertFutureThrows(classOf[TopicDeletionDisabledException], controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, + ApiKeys.DELETE_TOPICS.latestVersion().toInt, + hasClusterAuth = false, + _ => Set("foo", "bar"), + _ => Set("foo", "bar"))) + + TestUtils.assertFutureThrows(classOf[InvalidRequestException], controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, + 1, + hasClusterAuth = false, + _ => Set("foo", "bar"), + _ => Set("foo", "bar"))) } @ParameterizedTest diff --git a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala index c3b8b425d4541..a40087a597376 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala @@ -25,12 +25,14 @@ import org.apache.kafka.common.metrics.QuotaViolationException import org.apache.kafka.common.metrics.stats.TokenBucket import org.apache.kafka.common.utils.MockTime import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager, PermissiveControllerMutationQuota, QuotaType, StrictControllerMutationQuota} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertFalse import org.junit.jupiter.api.Test +import java.util.Optional + class StrictControllerMutationQuotaTest { @Test def testControllerMutationQuotaViolation(): Unit = { @@ -118,7 +120,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { private val config = new ClientQuotaManagerConfig(10, 1) private def withQuotaManager(f: ControllerMutationQuotaManager => Unit): Unit = { - val quotaManager = new ControllerMutationQuotaManager(config, metrics, time,"", None) + val quotaManager = new ControllerMutationQuotaManager(config, metrics, time,"", Optional.empty()) try { f(quotaManager) } finally { @@ -146,9 +148,9 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { def testControllerMutationQuotaViolation(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Some(User).map(s => ClientQuotaManager.UserEntity(s)), - Some(ClientQuotaManager.ClientIdEntity(ClientId)), - Some(Quota.upperBound(10)) + Optional.of(new ClientQuotaManager.UserEntity(User)), + Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), + Optional.of(Quota.upperBound(10)) ) val queueSizeMetric = metrics.metrics().get( metrics.metricName("queue-size", QuotaType.CONTROLLER_MUTATION.toString, "")) @@ -180,12 +182,12 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - quotaManager.throttledChannelReaper.doWork() + quotaManager.processThrottledChannelReaperDoWork() assertEquals(0, numCallbacks) // Callback can only be triggered after the delay time passes time.sleep(throttleTime) - quotaManager.throttledChannelReaper.doWork() + quotaManager.processThrottledChannelReaperDoWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) @@ -199,7 +201,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testNewStrictQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = { withQuotaManager { quotaManager => - assertEquals(UnboundedControllerMutationQuota, + assertEquals(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, quotaManager.newStrictQuotaFor(buildSession(User), ClientId)) } } @@ -208,9 +210,9 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { def testNewStrictQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Some(User).map(s => ClientQuotaManager.UserEntity(s)), - Some(ClientQuotaManager.ClientIdEntity(ClientId)), - Some(Quota.upperBound(10)) + Optional.of(new ClientQuotaManager.UserEntity(User)), + Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), + Optional.of(Quota.upperBound(10)) ) val quota = quotaManager.newStrictQuotaFor(buildSession(User), ClientId) assertTrue(quota.isInstanceOf[StrictControllerMutationQuota]) @@ -221,7 +223,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testNewPermissiveQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = { withQuotaManager { quotaManager => - assertEquals(UnboundedControllerMutationQuota, + assertEquals(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId)) } } @@ -230,9 +232,9 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { def testNewPermissiveQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Some(User).map(s => ClientQuotaManager.UserEntity(s)), - Some(ClientQuotaManager.ClientIdEntity(ClientId)), - Some(Quota.upperBound(10)) + Optional.of(new ClientQuotaManager.UserEntity(User)), + Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), + Optional.of(Quota.upperBound(10)) ) val quota = quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId) assertTrue(quota.isInstanceOf[PermissiveControllerMutationQuota]) diff --git a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala index f63434a256166..516b5edd082f3 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala @@ -16,7 +16,6 @@ package kafka.server import java.util.Properties import java.util.concurrent.ExecutionException import java.util.concurrent.TimeUnit -import kafka.server.ClientQuotaManager.DefaultTags import kafka.utils.TestUtils import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.internals.KafkaFutureImpl @@ -43,15 +42,13 @@ import org.apache.kafka.common.security.auth.AuthenticationContext import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} -import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.fail -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import scala.collection.Seq import scala.jdk.CollectionConverters._ @@ -126,9 +123,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { waitUserQuota(ThrottledPrincipal.getName, ControllerMutationRate) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testSetUnsetQuota(quorum: String): Unit = { + @Test + def testSetUnsetQuota(): Unit = { val rate = 1.5 val principal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "User") // Default Value @@ -143,9 +139,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { waitUserQuota(principal.getName, Long.MaxValue) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testQuotaMetric(quorum: String): Unit = { + @Test + def testQuotaMetric(): Unit = { asPrincipal(ThrottledPrincipal) { // Metric is lazily created assertTrue(quotaMetric(principal.getName).isEmpty) @@ -166,9 +161,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testStrictCreateTopicsRequest(quorum: String): Unit = { + @Test + def testStrictCreateTopicsRequest(): Unit = { asPrincipal(ThrottledPrincipal) { // Create two topics worth of 30 partitions each. As we use a strict quota, we // expect one to be created and one to be rejected. @@ -190,9 +184,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testPermissiveCreateTopicsRequest(quorum: String): Unit = { + @Test + def testPermissiveCreateTopicsRequest(): Unit = { asPrincipal(ThrottledPrincipal) { // Create two topics worth of 30 partitions each. As we use a permissive quota, we // expect both topics to be created. @@ -204,9 +197,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnboundedCreateTopicsRequest(quorum: String): Unit = { + @Test + def testUnboundedCreateTopicsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { // Create two topics worth of 30 partitions each. As we use an user without quota, we // expect both topics to be created. The throttle time should be equal to 0. @@ -216,9 +208,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testStrictDeleteTopicsRequest(quorum: String): Unit = { + @Test + def testStrictDeleteTopicsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) } @@ -244,9 +235,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testPermissiveDeleteTopicsRequest(quorum: String): Unit = { + @Test + def testPermissiveDeleteTopicsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) } @@ -262,9 +252,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnboundedDeleteTopicsRequest(quorum: String): Unit = { + @Test + def testUnboundedDeleteTopicsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) @@ -276,9 +265,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testStrictCreatePartitionsRequest(quorum: String): Unit = { + @Test + def testStrictCreatePartitionsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) } @@ -304,9 +292,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testPermissiveCreatePartitionsRequest(quorum: String): Unit = { + @Test + def testPermissiveCreatePartitionsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) } @@ -322,9 +309,8 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnboundedCreatePartitionsRequest(quorum: String): Unit = { + @Test + def testUnboundedCreatePartitionsRequest(): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) @@ -402,7 +388,7 @@ class ControllerMutationQuotaTest extends BaseRequestTest { "tokens", QuotaType.CONTROLLER_MUTATION.toString, "Tracking remaining tokens in the token bucket per user/client-id", - Map(DefaultTags.User -> user, DefaultTags.ClientId -> "").asJava) + java.util.Map.of(ClientQuotaManager.USER_TAG, user, ClientQuotaManager.CLIENT_ID_TAG, "")) Option(metrics.metric(metricName)) } diff --git a/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala index 68f775fb3e758..46ea20758e2df 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala @@ -76,7 +76,7 @@ class ControllerRegistrationManagerTest { "controller-registration-manager-test-", createSupportedFeatures(MetadataVersion.IBP_3_7_IV0), RecordTestUtils.createTestControllerRegistration(1, false).incarnationId(), - ListenerInfo.create(context.config.controllerListeners.map(_.toPublic).asJava), + ListenerInfo.create(context.config.controllerListeners.asJava), new ExponentialBackoff(1, 2, 100, 0.02)) } diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala index ca61193425a1f..df2ef30ee45d0 100644 --- a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala @@ -24,16 +24,14 @@ import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCol import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.CreateTopicsRequest import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import scala.jdk.CollectionConverters._ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testValidCreateTopicsRequests(quorum: String): Unit = { + @Test + def testValidCreateTopicsRequests(): Unit = { // Generated assignments validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic1")))) validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic2", replicationFactor = 3)))) @@ -61,9 +59,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { topicReq("topic14", replicationFactor = -1, numPartitions = 2)))) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testErrorCreateTopicsRequests(quorum: String): Unit = { + @Test + def testErrorCreateTopicsRequests(): Unit = { val existingTopic = "existing-topic" createTopic(existingTopic) // Basic @@ -99,9 +96,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { validateTopicExists("partial-none") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testInvalidCreateTopicsRequests(quorum: String): Unit = { + @Test + def testInvalidCreateTopicsRequests(): Unit = { // Partitions/ReplicationFactor and ReplicaAssignment validateErrorCreateTopicsRequests(topicsReq(Seq( topicReq("bad-args-topic", numPartitions = 10, replicationFactor = 3, @@ -114,9 +110,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { Map("bad-args-topic" -> error(Errors.INVALID_REQUEST)), checkErrorMessage = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateTopicsRequestVersions(quorum: String): Unit = { + @Test + def testCreateTopicsRequestVersions(): Unit = { for (version <- ApiKeys.CREATE_TOPICS.oldestVersion to ApiKeys.CREATE_TOPICS.latestVersion) { val topic = s"topic_$version" val data = new CreateTopicsRequestData() @@ -153,9 +148,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateClusterMetadataTopic(quorum: String): Unit = { + @Test + def testCreateClusterMetadataTopic(): Unit = { validateErrorCreateTopicsRequests( topicsReq(Seq(topicReq(Topic.CLUSTER_METADATA_TOPIC_NAME))), Map(Topic.CLUSTER_METADATA_TOPIC_NAME -> diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala deleted file mode 100644 index 96ebfd66683b6..0000000000000 --- a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import java.util -import java.util.Properties -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.errors.PolicyViolationException -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.server.config.ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG -import org.apache.kafka.server.policy.CreateTopicPolicy -import org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata -import org.junit.jupiter.api.TestInfo -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -import scala.jdk.CollectionConverters._ - -class CreateTopicsRequestWithPolicyTest extends AbstractCreateTopicsRequestTest { - import CreateTopicsRequestWithPolicyTest._ - - override def brokerPropertyOverrides(properties: Properties): Unit = { - super.brokerPropertyOverrides(properties) - properties.put(CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, classOf[Policy].getName) - } - - override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { - val properties = new Properties() - properties.put(CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, classOf[Policy].getName) - Seq(properties) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testValidCreateTopicsRequests(quorum: String): Unit = { - validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic1", - numPartitions = 5)))) - - validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic2", - numPartitions = 5, replicationFactor = 3)), - validateOnly = true)) - - validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic3", - numPartitions = 11, replicationFactor = 2, - config = Map(TopicConfig.RETENTION_MS_CONFIG -> 4999.toString))), - validateOnly = true)) - - validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic4", - assignment = Map(0 -> List(1, 0), 1 -> List(0, 1)))))) - } - - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testErrorCreateTopicsRequests(quorum: String): Unit = { - val existingTopic = "existing-topic" - createTopic(existingTopic, 5) - - // Policy violations - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic1", - numPartitions = 4, replicationFactor = 1))), - Map("policy-topic1" -> error(Errors.POLICY_VIOLATION, Some("Topics should have at least 5 partitions, received 4")))) - - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic2", - numPartitions = 4, replicationFactor = 3)), validateOnly = true), - Map("policy-topic2" -> error(Errors.POLICY_VIOLATION, Some("Topics should have at least 5 partitions, received 4")))) - - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic3", - numPartitions = 11, replicationFactor = 2, - config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), - Map("policy-topic3" -> error(Errors.POLICY_VIOLATION, - Some("RetentionMs should be less than 5000ms if replicationFactor > 5")))) - - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic4", - numPartitions = 11, replicationFactor = 3, - config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), - Map("policy-topic4" -> error(Errors.POLICY_VIOLATION, - Some("RetentionMs should be less than 5000ms if replicationFactor > 5")))) - - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic5", - assignment = Map(0 -> List(1), 1 -> List(0)), - config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), - Map("policy-topic5" -> error(Errors.POLICY_VIOLATION, - Some("Topic partitions should have at least 2 partitions, received 1 for partition 0")))) - - // Check that basic errors still work - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq(existingTopic, - numPartitions = 5, replicationFactor = 1))), - Map(existingTopic -> error(Errors.TOPIC_ALREADY_EXISTS, - Some("Topic 'existing-topic' already exists.")))) - - var errorMsg = "Unable to replicate the partition 4 time(s): The target replication factor of 4 cannot be reached because only 3 broker(s) are registered." - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication", - numPartitions = 10, replicationFactor = brokerCount + 1)), validateOnly = true), - Map("error-replication" -> error(Errors.INVALID_REPLICATION_FACTOR, - Some(errorMsg)))) - - errorMsg = "Replication factor must be larger than 0, or -1 to use the default value." - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication2", - numPartitions = 10, replicationFactor = -2)), validateOnly = true), - Map("error-replication2" -> error(Errors.INVALID_REPLICATION_FACTOR, - Some(errorMsg)))) - - errorMsg = "Number of partitions was set to an invalid non-positive value." - validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-partitions", - numPartitions = -2, replicationFactor = 1)), validateOnly = true), - Map("error-partitions" -> error(Errors.INVALID_PARTITIONS, - Some(errorMsg)))) - } - -} - -object CreateTopicsRequestWithPolicyTest { - - class Policy extends CreateTopicPolicy { - - var configs: Map[String, _] = _ - var closed = false - - def configure(configs: util.Map[String, _]): Unit = { - this.configs = configs.asScala.toMap - } - - def validate(requestMetadata: RequestMetadata): Unit = { - if (Topic.isInternal(requestMetadata.topic())) { - // Do not verify internal topics - return - } - require(!closed, "Policy should not be closed") - require(configs.nonEmpty, "configure should have been called with non empty configs") - - import requestMetadata._ - if (numPartitions != null || replicationFactor != null) { - require(numPartitions != null, s"numPartitions should not be null, but it is $numPartitions") - require(replicationFactor != null, s"replicationFactor should not be null, but it is $replicationFactor") - require(replicasAssignments == null, s"replicaAssignments should be null, but it is $replicasAssignments") - - if (numPartitions < 5) - throw new PolicyViolationException(s"Topics should have at least 5 partitions, received $numPartitions") - - if (numPartitions > 10) { - if (requestMetadata.configs.asScala.get(TopicConfig.RETENTION_MS_CONFIG).fold(true)(_.toInt > 5000)) - throw new PolicyViolationException("RetentionMs should be less than 5000ms if replicationFactor > 5") - } else - require(requestMetadata.configs.isEmpty, s"Topic configs should be empty, but it is ${requestMetadata.configs}") - - } else { - require(numPartitions == null, s"numPartitions should be null, but it is $numPartitions") - require(replicationFactor == null, s"replicationFactor should be null, but it is $replicationFactor") - require(replicasAssignments != null, s"replicaAssignments should not be null, but it is $replicasAssignments") - - replicasAssignments.asScala.toSeq.sortBy { case (tp, _) => tp }.foreach { case (partitionId, assignment) => - if (assignment.size < 2) - throw new PolicyViolationException("Topic partitions should have at least 2 partitions, received " + - s"${assignment.size} for partition $partitionId") - } - } - - } - - def close(): Unit = closed = true - - } -} diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala index b4c8d922dd9b7..8e20f98aaafd3 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala @@ -21,10 +21,8 @@ import kafka.security.JaasTestUtils import java.util import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.common.errors.UnsupportedByAuthenticationException -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions.assertThrows -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import scala.concurrent.ExecutionException import scala.jdk.javaapi.OptionConverters @@ -48,9 +46,8 @@ class DelegationTokenRequestsOnPlainTextTest extends BaseRequestTest { config } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDelegationTokenRequests(quorum: String): Unit = { + @Test + def testDelegationTokenRequests(): Unit = { adminClient = Admin.create(createAdminConfig) val createResult = adminClient.createDelegationToken() diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala index 2c211eb042a11..38040d0a120a0 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala @@ -25,9 +25,7 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.SecurityUtils import org.apache.kafka.server.config.DelegationTokenManagerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.util import scala.concurrent.ExecutionException @@ -65,9 +63,8 @@ class DelegationTokenRequestsTest extends IntegrationTestHarness with SaslSetup config } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDelegationTokenRequests(quorum: String): Unit = { + @Test + def testDelegationTokenRequests(): Unit = { adminClient = Admin.create(createAdminConfig) // create token1 with renewer1 diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala index c380816f769fe..bb55cf33ffd59 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala @@ -22,9 +22,7 @@ import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.common.errors.DelegationTokenDisabledException import org.apache.kafka.common.security.auth.SecurityProtocol import org.junit.jupiter.api.Assertions.assertThrows -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.util import scala.concurrent.ExecutionException @@ -55,9 +53,8 @@ class DelegationTokenRequestsWithDisableTokenFeatureTest extends BaseRequestTest config } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDelegationTokenRequests(quorum: String): Unit = { + @Test + def testDelegationTokenRequests(): Unit = { adminClient = Admin.create(createAdminConfig) val createResult = adminClient.createDelegationToken() diff --git a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala index 88733a8657614..d945c02b6f5f5 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala @@ -48,7 +48,7 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -78,8 +78,8 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator ) deleteGroups( - groupIds = List("grp-non-empty", "grp"), - expectedErrors = List(Errors.NON_EMPTY_GROUP, Errors.NONE), + groupIds = List("grp-non-empty", "grp", ""), + expectedErrors = List(Errors.NON_EMPTY_GROUP, Errors.NONE, Errors.GROUP_ID_NOT_FOUND), version = version.toShort ) @@ -89,6 +89,7 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.GROUP_ID_NOT_FOUND, diff --git a/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala index 587fc4e5e626a..de2eb967d9718 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala @@ -25,8 +25,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{DeleteRecordsRequest, DeleteRecordsResponse} import org.apache.kafka.common.serialization.StringSerializer import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import java.util.Collections import java.util.concurrent.TimeUnit @@ -36,9 +35,8 @@ class DeleteRecordsRequestTest extends BaseRequestTest { private val TIMEOUT_MS = 1000 private val MESSAGES_PRODUCED_PER_PARTITION = 10 - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteRecordsHappyCase(quorum: String): Unit = { + @Test + def testDeleteRecordsHappyCase(): Unit = { val (topicPartition: TopicPartition, leaderId: Int) = createTopicAndSendRecords // Create the DeleteRecord request requesting deletion of offset which is not present @@ -61,9 +59,8 @@ class DeleteRecordsRequestTest extends BaseRequestTest { validateLogStartOffsetForTopic(topicPartition, offsetToDelete) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testErrorWhenDeletingRecordsWithInvalidOffset(quorum: String): Unit = { + @Test + def testErrorWhenDeletingRecordsWithInvalidOffset(): Unit = { val (topicPartition: TopicPartition, leaderId: Int) = createTopicAndSendRecords // Create the DeleteRecord request requesting deletion of offset which is not present @@ -86,9 +83,8 @@ class DeleteRecordsRequestTest extends BaseRequestTest { validateLogStartOffsetForTopic(topicPartition, 0) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testErrorWhenDeletingRecordsWithInvalidTopic(quorum: String): Unit = { + @Test + def testErrorWhenDeletingRecordsWithInvalidTopic(): Unit = { val invalidTopicPartition = new TopicPartition("invalid-topic", 0) // Create the DeleteRecord request requesting deletion of offset which is not present val offsetToDelete = 1 diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala index ca088dd168032..d51aba04c8dac 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala @@ -28,17 +28,15 @@ import org.apache.kafka.common.requests.DeleteTopicsResponse import org.apache.kafka.common.requests.MetadataRequest import org.apache.kafka.common.requests.MetadataResponse import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import scala.collection.Seq import scala.jdk.CollectionConverters._ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTopicDeletionClusterHasOfflinePartitions(quorum: String): Unit = { + @Test + def testTopicDeletionClusterHasOfflinePartitions(): Unit = { // Create two topics with one partition/replica. Make one of them offline. val offlineTopic = "topic-1" val onlineTopic = "topic-2" @@ -70,9 +68,8 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { "The topics are found in the Broker's cache") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testValidDeleteTopicRequests(quorum: String): Unit = { + @Test + def testValidDeleteTopicRequests(): Unit = { val timeout = 10000 // Single topic createTopic("topic-1") @@ -138,9 +135,8 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { connectAndReceive[DeleteTopicsResponse](request, destination = socketServer) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteTopicsVersions(quorum: String): Unit = { + @Test + def testDeleteTopicsVersions(): Unit = { val timeout = 10000 for (version <- ApiKeys.DELETE_TOPICS.oldestVersion to ApiKeys.DELETE_TOPICS.latestVersion) { info(s"Creating and deleting tests for version $version") diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala index 4232030634cb8..d8d654082e9ad 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala @@ -25,9 +25,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{DeleteTopicsRequest, DeleteTopicsResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.TestInfo -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{Test, TestInfo} class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { @@ -48,9 +46,8 @@ class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { props.map(KafkaConfig.fromProps) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDeleteRecordsRequest(quorum: String): Unit = { + @Test + def testDeleteRecordsRequest(): Unit = { val topic = "topic-1" val request = new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() diff --git a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala index 6e43f904c11c7..1d3048cec6ae8 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala @@ -27,9 +27,7 @@ import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.config.{ServerConfigs, ReplicationConfigs} import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import java.lang.{Byte => JByte} import java.util.Properties @@ -48,15 +46,13 @@ class DescribeClusterRequestTest extends BaseRequestTest { doSetup(testInfo, createOffsetsTopic = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeClusterRequestIncludingClusterAuthorizedOperations(quorum: String): Unit = { + @Test + def testDescribeClusterRequestIncludingClusterAuthorizedOperations(): Unit = { testDescribeClusterRequest(true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeClusterRequestExcludingClusterAuthorizedOperations(quorum: String): Unit = { + @Test + def testDescribeClusterRequestExcludingClusterAuthorizedOperations(): Unit = { testDescribeClusterRequest(false) } diff --git a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala index d16ea1fd9e56c..67db0449ffe72 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala @@ -93,10 +93,15 @@ class DescribeGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinat .setGroupId("grp-unknown") .setGroupState(ClassicGroupState.DEAD.toString) // Return DEAD group when the group does not exist. .setErrorCode(if (version >= 6) Errors.GROUP_ID_NOT_FOUND.code() else Errors.NONE.code()) - .setErrorMessage(if (version >= 6) "Group grp-unknown not found." else null) + .setErrorMessage(if (version >= 6) "Group grp-unknown not found." else null), + new DescribedGroup() + .setGroupId("") + .setGroupState(ClassicGroupState.DEAD.toString) // Return DEAD group when the group does not exist. + .setErrorCode(if (version >= 6) Errors.GROUP_ID_NOT_FOUND.code() else Errors.NONE.code()) + .setErrorMessage(if (version >= 6) "Group not found." else null) ), describeGroups( - groupIds = List("grp-1", "grp-2", "grp-unknown"), + groupIds = List("grp-1", "grp-2", "grp-unknown", ""), version = version.toShort ) ) diff --git a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala index d22b53c0cb67c..ab2ea99782d11 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala @@ -25,8 +25,7 @@ import org.apache.kafka.common.message.DescribeLogDirsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import scala.jdk.CollectionConverters._ @@ -39,11 +38,10 @@ class DescribeLogDirsRequestTest extends BaseRequestTest { val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeLogDirsRequest(quorum: String): Unit = { - val onlineDir = new File(brokers.head.config.logDirs.head).getAbsolutePath - val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath + @Test + def testDescribeLogDirsRequest(): Unit = { + val onlineDir = new File(brokers.head.config.logDirs.get(0)).getAbsolutePath + val offlineDir = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath brokers.head.replicaManager.handleLogDirFailure(offlineDir) createTopic(topic, partitionNum, 1) TestUtils.generateAndProduceMessages(brokers, topic, 10) diff --git a/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala index 2aa8f5a9e2ca0..bc0e768c6f48f 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala @@ -19,12 +19,12 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.DescribeQuorumRequest.singletonRequest -import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, DescribeQuorumRequest, DescribeQuorumResponse} +import org.apache.kafka.common.requests.{DescribeQuorumRequest, DescribeQuorumResponse} import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions._ import scala.jdk.CollectionConverters._ -import scala.reflect.ClassTag @ClusterTestDefaults(types = Array(Type.KRAFT)) class DescribeQuorumRequestTest(cluster: ClusterInstance) { @@ -35,7 +35,7 @@ class DescribeQuorumRequestTest(cluster: ClusterInstance) { val request = new DescribeQuorumRequest.Builder( singletonRequest(KafkaRaftServer.MetadataPartition) ).build(version.toShort) - val response = connectAndReceive[DescribeQuorumResponse](request) + val response = IntegrationTestUtils.connectAndReceive[DescribeQuorumResponse](request, cluster.brokerBoundPorts().get(0)) assertEquals(Errors.NONE, Errors.forCode(response.data.errorCode)) assertEquals("", response.data.errorMessage) @@ -81,21 +81,8 @@ class DescribeQuorumRequestTest(cluster: ClusterInstance) { val nodes = response.data.nodes().asScala assertEquals(cluster.controllerIds().asScala, nodes.map(_.nodeId()).toSet) val node = nodes.find(_.nodeId() == cluster.controllers().keySet().asScala.head) - assertEquals(cluster.controllerListenerName().get().value(), node.get.listeners().asScala.head.name()) + assertEquals(cluster.controllerListenerName().value(), node.get.listeners().asScala.head.name()) } } } - - private def connectAndReceive[T <: AbstractResponse]( - request: AbstractRequest - )( - implicit classTag: ClassTag[T] - ): T = { - IntegrationTestUtils.connectAndReceive( - request, - cluster.brokerSocketServers().asScala.head, - cluster.clientListener() - ) - } - } diff --git a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala index 7a4a885d9edd7..418753e4a6e18 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala @@ -24,8 +24,7 @@ import org.apache.kafka.common.requests.{DescribeUserScramCredentialsRequest, De import org.apache.kafka.metadata.authorizer.StandardAuthorizer import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import java.util.Properties @@ -39,9 +38,8 @@ class DescribeUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTe properties.put(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, classOf[DescribeCredentialsTest.TestPrincipalBuilderReturningUnauthorized].getName) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeNotAuthorized(quorum: String): Unit = { + @Test + def testDescribeNotAuthorized(): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData()).build() val response = sendDescribeUserScramCredentialsRequest(request) diff --git a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala index bc8bd5a13ac29..850cca028e50e 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala @@ -28,8 +28,6 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ @@ -48,9 +46,8 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { super.setUp(testInfo) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeNothing(quorum: String): Unit = { + @Test + def testDescribeNothing(): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData()).build() val response = sendDescribeUserScramCredentialsRequest(request) @@ -60,9 +57,8 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(0, response.data.results.size, "Expected no credentials when describing everything and there are no credentials") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeWithNull(quorum: String): Unit = { + @Test + def testDescribeWithNull(): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData().setUsers(null)).build() val response = sendDescribeUserScramCredentialsRequest(request) @@ -82,9 +78,8 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(Errors.NONE.code, error, "Did not expect controller error when routed to non-controller") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDescribeSameUserTwice(quorum: String): Unit = { + @Test + def testDescribeSameUserTwice(): Unit = { val user = "user1" val userName = new UserName().setName(user) val request = new DescribeUserScramCredentialsRequest.Builder( @@ -98,9 +93,8 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(s"Cannot describe SCRAM credentials for the same user twice in a single request: $user", result.errorMessage) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnknownUser(quorum: String): Unit = { + @Test + def testUnknownUser(): Unit = { val unknownUser = "unknownUser" val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData().setUsers(List(new UserName().setName(unknownUser)).asJava)).build() diff --git a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala index 17ad2200dcc3c..141b5138c0753 100755 --- a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala @@ -31,7 +31,7 @@ import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.metrics.{JmxReporter, Metrics} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.DynamicThreadPool import org.apache.kafka.server.authorizer._ @@ -175,6 +175,7 @@ class DynamicBrokerConfigTest { assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_READER_THREADS, config.remoteLogManagerConfig.remoteLogReaderThreads()) + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize()) val serverMock = mock(classOf[KafkaBroker]) val remoteLogManager = mock(classOf[RemoteLogManager]) @@ -203,6 +204,13 @@ class DynamicBrokerConfigTest { config.dynamicConfig.updateDefaultConfig(props) assertEquals(6, config.remoteLogManagerConfig.remoteLogReaderThreads()) verify(remoteLogManager).resizeReaderThreadPool(6) + + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "3") + config.dynamicConfig.validate(props, perBrokerConfig = false) + config.dynamicConfig.updateDefaultConfig(props) + assertEquals(3, config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize()) + verify(remoteLogManager).resizeFollowerThreadPool(3) + props.clear() verifyNoMoreInteractions(remoteLogManager) } @@ -241,6 +249,33 @@ class DynamicBrokerConfigTest { val err3 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = true)) assertTrue(err3.getMessage.contains("Value must be at least 1")) verifyNoMoreInteractions(remoteLogManager) + + val props4 = new Properties() + props4.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "10") + val err4 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props4, perBrokerConfig = false)) + assertTrue(err4.getMessage.contains("value should not be greater than double the current value")) + verifyNoMoreInteractions(remoteLogManager) + } + + @Test + def testDynamicRemoteLogManagerFollowerThreadPoolSizeConfig(): Unit = { + val origProps = TestUtils.createBrokerConfig(0, port = 9092) + origProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP, "10") + val config = KafkaConfig(origProps) + + val serverMock = mock(classOf[KafkaBroker]) + val remoteLogManager = mock(classOf[RemoteLogManager]) + when(serverMock.config).thenReturn(config) + when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) + + config.dynamicConfig.initialize(None) + config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) + + val props = new Properties() + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "2") + val err = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = false)) + assertTrue(err.getMessage.contains("value should be at least half the current value")) + verifyNoMoreInteractions(remoteLogManager) } @Test @@ -670,16 +705,6 @@ class DynamicBrokerConfigTest { assertTrue(m.currentReporters.isEmpty) } - @Test - def testNonInternalValuesDoesNotExposeInternalConfigs(): Unit = { - val props = TestUtils.createBrokerConfig(0, port = 8181) - props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG, "1024") - val config = new KafkaConfig(props) - assertFalse(config.nonInternalValues.containsKey(MetadataLogConfig.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG)) - config.updateCurrentConfig(new KafkaConfig(props)) - assertFalse(config.nonInternalValues.containsKey(MetadataLogConfig.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG)) - } - @Test def testDynamicLogLocalRetentionMsConfig(): Unit = { val props = TestUtils.createBrokerConfig(0, port = 8181) @@ -1036,7 +1061,7 @@ class DynamicBrokerConfigTest { props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "SASL_PLAINTEXT://localhost:8181") ctx.config.dynamicConfig.updateDefaultConfig(props) ctx.config.effectiveAdvertisedBrokerListeners.foreach(e => - assertEquals(SecurityProtocol.PLAINTEXT.name, e.listenerName.value) + assertEquals(SecurityProtocol.PLAINTEXT.name, e.listener) ) assertFalse(ctx.currentDefaultLogConfig.get().originals().containsKey(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG)) } diff --git a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala index 28c8d694f9d51..519a7d951a381 100644 --- a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala @@ -40,8 +40,6 @@ import org.apache.kafka.storage.internals.log.{LogConfig, UnifiedLog} import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{Test, Timeout} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.any import org.mockito.Mockito._ @@ -61,9 +59,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { List(KafkaConfig.fromProps(cfg)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConfigChange(quorum: String): Unit = { + @Test + def testConfigChange(): Unit = { val oldVal: java.lang.Long = 100000L val newVal: java.lang.Long = 200000L val tp = new TopicPartition("test", 0) @@ -95,21 +92,20 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicTopicConfigChange(quorum: String): Unit = { + @Test + def testDynamicTopicConfigChange(): Unit = { val tp = new TopicPartition("test", 0) - val oldSegmentSize = 1000 + val oldSegmentSize = 2 * 1024 * 1024 val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, oldSegmentSize.toString) createTopic(tp.topic, 1, 1, logProps) TestUtils.retry(10000) { val logOpt = this.brokers.head.logManager.getLog(tp) assertTrue(logOpt.isDefined) - assertEquals(oldSegmentSize, logOpt.get.config.segmentSize) + assertEquals(oldSegmentSize, logOpt.get.config.segmentSize()) } - val newSegmentSize = 2000 + val newSegmentSize = 2 * 1024 * 1024 val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) @@ -121,7 +117,7 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } val log = brokers.head.logManager.getLog(tp).get TestUtils.retry(10000) { - assertEquals(newSegmentSize, log.config.segmentSize) + assertEquals(newSegmentSize, log.config.segmentSize()) } (1 to 50).foreach(i => TestUtils.produceMessage(brokers, tp.topic, i.toString)) @@ -180,59 +176,52 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClientIdQuotaConfigChange(quorum: String): Unit = { + @Test + def testClientIdQuotaConfigChange(): Unit = { val m = new util.HashMap[String, String] m.put(CLIENT_ID, "testClient") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUserQuotaConfigChange(quorum: String): Unit = { + @Test + def testUserQuotaConfigChange(): Unit = { val m = new util.HashMap[String, String] m.put(USER, "ANONYMOUS") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUserClientIdQuotaChange(quorum: String): Unit = { + @Test + def testUserClientIdQuotaChange(): Unit = { val m = new util.HashMap[String, String] m.put(USER, "ANONYMOUS") m.put(CLIENT_ID, "testClient") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultClientIdQuotaConfigChange(quorum: String): Unit = { + @Test + def testDefaultClientIdQuotaConfigChange(): Unit = { val m = new util.HashMap[String, String] m.put(CLIENT_ID, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultUserQuotaConfigChange(quorum: String): Unit = { + @Test + def testDefaultUserQuotaConfigChange(): Unit = { val m = new util.HashMap[String, String] m.put(USER, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultUserClientIdQuotaConfigChange(quorum: String): Unit = { + @Test + def testDefaultUserClientIdQuotaConfigChange(): Unit = { val m = new util.HashMap[String, String] m.put(USER, null) m.put(CLIENT_ID, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIpQuotaInitialization(quorum: String): Unit = { + @Test + def testIpQuotaInitialization(): Unit = { val broker = brokers.head val admin = createAdminClient() try { @@ -252,9 +241,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIpQuotaConfigChange(quorum: String): Unit = { + @Test + def testIpQuotaConfigChange(): Unit = { val admin = createAdminClient() try { val alterations = util.Arrays.asList( @@ -296,9 +284,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { private def tempTopic() : String = "testTopic" + random.nextInt(1000000) - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConfigChangeOnNonExistingTopicWithAdminClient(quorum: String): Unit = { + @Test + def testConfigChangeOnNonExistingTopicWithAdminClient(): Unit = { val topic = tempTopic() val admin = createAdminClient() try { @@ -314,9 +301,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterDefaultTopicConfig(quorum: String): Unit = { + @Test + def testIncrementalAlterDefaultTopicConfig(): Unit = { val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.TOPIC, "") @@ -346,9 +332,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testBrokerIdConfigChangeAndDelete(quorum: String): Unit = { + @Test + def testBrokerIdConfigChangeAndDelete(): Unit = { val newValue: Long = 100000L val brokerId: String = this.brokers.head.config.brokerId.toString setBrokerConfigs(brokerId, newValue) @@ -370,9 +355,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultBrokerIdConfigChangeAndDelete(quorum: String): Unit = { + @Test + def testDefaultBrokerIdConfigChangeAndDelete(): Unit = { val newValue: Long = 100000L val brokerId: String = "" setBrokerConfigs(brokerId, newValue) @@ -393,9 +377,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDefaultAndBrokerIdConfigChange(quorum: String): Unit = { + @Test + def testDefaultAndBrokerIdConfigChange(): Unit = { val newValue: Long = 100000L val brokerId: String = this.brokers.head.config.brokerId.toString setBrokerConfigs(brokerId, newValue) @@ -411,9 +394,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDynamicGroupConfigChange(quorum: String): Unit = { + @Test + def testDynamicGroupConfigChange(): Unit = { val newSessionTimeoutMs = 50000 val consumerGroupId = "group-foo" val admin = createAdminClient() @@ -438,9 +420,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertEquals(newSessionTimeoutMs, groupConfig.consumerSessionTimeoutMs()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft+kip848")) - def testDynamicShareGroupConfigChange(quorum: String): Unit = { + @Test + def testDynamicShareGroupConfigChange(): Unit = { val newRecordLockDurationMs = 50000 val shareGroupId = "group-foo" val admin = createAdminClient() @@ -465,9 +446,8 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertEquals(newRecordLockDurationMs, groupConfig.shareRecordLockDurationMs) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIncrementalAlterDefaultGroupConfig(quorum: String): Unit = { + @Test + def testIncrementalAlterDefaultGroupConfig(): Unit = { val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.GROUP, "") diff --git a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala index cf7c6bb4d3c97..ed1f94c319349 100755 --- a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala @@ -28,16 +28,15 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.message.ProduceRequestData import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.types.Type -import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.protocol.{ApiKeys, ByteBufferAccessor, Errors} import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.{ProduceResponse, ResponseHeader} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.ByteUtils -import org.apache.kafka.common.{TopicPartition, requests} +import org.apache.kafka.common.{TopicPartition, Uuid, requests} import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import scala.jdk.CollectionConverters._ @@ -118,9 +117,8 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceRequestWithNullClientId(quorum: String): Unit = { + @Test + def testProduceRequestWithNullClientId(): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val correlationId = -1 @@ -129,10 +127,12 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { val version = ApiKeys.PRODUCE.latestVersion: Short val (serializedBytes, responseHeaderVersion) = { val headerBytes = requestHeaderBytes(ApiKeys.PRODUCE.id, version, "", correlationId) + val topicId = getTopicIds().getOrElse(topicPartition.topic(), Uuid.ZERO_UUID) val request = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()).setPartitionData(Collections.singletonList( + .setTopicId(topicId) + .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(topicPartition.partition()) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("message".getBytes)))))) @@ -152,7 +152,7 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { val responseBuffer = ByteBuffer.wrap(response) val responseHeader = ResponseHeader.parse(responseBuffer, responseHeaderVersion) - val produceResponse = ProduceResponse.parse(responseBuffer, version) + val produceResponse = ProduceResponse.parse(new ByteBufferAccessor(responseBuffer), version) assertEquals(0, responseBuffer.remaining, "The response should parse completely") assertEquals(correlationId, responseHeader.correlationId, "The correlationId should match request") @@ -164,27 +164,23 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode), "There should be no error") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHeaderOnlyRequest(quorum: String): Unit = { + @Test + def testHeaderOnlyRequest(): Unit = { verifyDisconnect(requestHeaderBytes(ApiKeys.PRODUCE.id, 1)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testInvalidApiKeyRequest(quorum: String): Unit = { + @Test + def testInvalidApiKeyRequest(): Unit = { verifyDisconnect(requestHeaderBytes(-1, 0)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testInvalidApiVersionRequest(quorum: String): Unit = { + @Test + def testInvalidApiVersionRequest(): Unit = { verifyDisconnect(requestHeaderBytes(ApiKeys.PRODUCE.id, -1)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMalformedHeaderRequest(quorum: String): Unit = { + @Test + def testMalformedHeaderRequest(): Unit = { val serializedBytes = { // Only send apiKey and apiVersion val buffer = ByteBuffer.allocate( diff --git a/core/src/test/scala/unit/kafka/server/ExpiringErrorCacheTest.scala b/core/src/test/scala/unit/kafka/server/ExpiringErrorCacheTest.scala new file mode 100644 index 0000000000000..be02f95374cb3 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/ExpiringErrorCacheTest.scala @@ -0,0 +1,400 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.server.util.MockTime +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{BeforeEach, Test} +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global +import scala.util.Random +import java.util.concurrent.{CountDownLatch, TimeUnit} + +class ExpiringErrorCacheTest { + + private var mockTime: MockTime = _ + private var cache: ExpiringErrorCache = _ + + @BeforeEach + def setUp(): Unit = { + mockTime = new MockTime() + } + + // Basic Functionality Tests + + @Test + def testPutAndGet(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 2000L) + + val errors = cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()) + assertEquals(2, errors.size) + assertEquals("error1", errors("topic1")) + assertEquals("error2", errors("topic2")) + } + + @Test + def testGetNonExistentTopic(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + + val errors = cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()) + assertEquals(1, errors.size) + assertEquals("error1", errors("topic1")) + assertFalse(errors.contains("topic2")) + } + + @Test + def testUpdateExistingEntry(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + assertEquals("error1", cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds())("topic1")) + + // Update with new error + cache.put("topic1", "error2", 2000L) + assertEquals("error2", cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds())("topic1")) + } + + @Test + def testGetMultipleTopics(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 1000L) + cache.put("topic3", "error3", 1000L) + + val errors = cache.getErrorsForTopics(Set("topic1", "topic3", "topic4"), mockTime.milliseconds()) + assertEquals(2, errors.size) + assertEquals("error1", errors("topic1")) + assertEquals("error3", errors("topic3")) + assertFalse(errors.contains("topic2")) + assertFalse(errors.contains("topic4")) + } + + // Expiration Tests + + @Test + def testExpiredEntryNotReturned(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + + // Entry should be available before expiration + assertEquals(1, cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds()).size) + + // Advance time past expiration + mockTime.sleep(1001L) + + // Entry should not be returned after expiration + assertTrue(cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds()).isEmpty) + } + + @Test + def testExpiredEntriesCleanedOnPut(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + // Add entries with different TTLs + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 2000L) + + // Advance time to expire topic1 but not topic2 + mockTime.sleep(1500L) + + // Add a new entry - this should trigger cleanup + cache.put("topic3", "error3", 1000L) + + // Verify only non-expired entries remain + val errors = cache.getErrorsForTopics(Set("topic1", "topic2", "topic3"), mockTime.milliseconds()) + assertEquals(2, errors.size) + assertFalse(errors.contains("topic1")) + assertEquals("error2", errors("topic2")) + assertEquals("error3", errors("topic3")) + } + + @Test + def testMixedExpiredAndValidEntries(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 500L) + cache.put("topic2", "error2", 1000L) + cache.put("topic3", "error3", 1500L) + + // Advance time to expire only topic1 + mockTime.sleep(600L) + + val errors = cache.getErrorsForTopics(Set("topic1", "topic2", "topic3"), mockTime.milliseconds()) + assertEquals(2, errors.size) + assertFalse(errors.contains("topic1")) + assertTrue(errors.contains("topic2")) + assertTrue(errors.contains("topic3")) + } + + // Capacity Enforcement Tests + + @Test + def testCapacityEnforcement(): Unit = { + cache = new ExpiringErrorCache(3, mockTime) + + // Add 5 entries, exceeding capacity of 3 + for (i <- 1 to 5) { + cache.put(s"topic$i", s"error$i", 1000L) + // Small time advance between entries to ensure different insertion order + mockTime.sleep(10L) + } + + val errors = cache.getErrorsForTopics((1 to 5).map(i => s"topic$i").toSet, mockTime.milliseconds()) + assertEquals(3, errors.size) + + // The cache evicts by earliest expiration time + // Since all have same TTL, earliest inserted (topic1, topic2) should be evicted + assertFalse(errors.contains("topic1")) + assertFalse(errors.contains("topic2")) + assertTrue(errors.contains("topic3")) + assertTrue(errors.contains("topic4")) + assertTrue(errors.contains("topic5")) + } + + @Test + def testEvictionOrder(): Unit = { + cache = new ExpiringErrorCache(3, mockTime) + + // Add entries with different TTLs + cache.put("topic1", "error1", 3000L) // Expires at 3000 + mockTime.sleep(100L) + cache.put("topic2", "error2", 1000L) // Expires at 1100 + mockTime.sleep(100L) + cache.put("topic3", "error3", 2000L) // Expires at 2200 + mockTime.sleep(100L) + cache.put("topic4", "error4", 500L) // Expires at 800 + + // With capacity 3, topic4 (earliest expiration) should be evicted + val errors = cache.getErrorsForTopics(Set("topic1", "topic2", "topic3", "topic4"), mockTime.milliseconds()) + assertEquals(3, errors.size) + assertTrue(errors.contains("topic1")) + assertTrue(errors.contains("topic2")) + assertTrue(errors.contains("topic3")) + assertFalse(errors.contains("topic4")) + } + + @Test + def testCapacityWithDifferentTTLs(): Unit = { + cache = new ExpiringErrorCache(2, mockTime) + + cache.put("topic1", "error1", 5000L) // Long TTL + cache.put("topic2", "error2", 100L) // Short TTL + cache.put("topic3", "error3", 3000L) // Medium TTL + + // topic2 has earliest expiration, so it should be evicted + val errors = cache.getErrorsForTopics(Set("topic1", "topic2", "topic3"), mockTime.milliseconds()) + assertEquals(2, errors.size) + assertTrue(errors.contains("topic1")) + assertFalse(errors.contains("topic2")) + assertTrue(errors.contains("topic3")) + } + + // Update and Stale Entry Tests + + @Test + def testUpdateDoesNotLeaveStaleEntries(): Unit = { + cache = new ExpiringErrorCache(3, mockTime) + + // Fill cache to capacity + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 1000L) + cache.put("topic3", "error3", 1000L) + + // Update topic2 with longer TTL + cache.put("topic2", "error2_updated", 5000L) + + // Add new entry to trigger eviction + cache.put("topic4", "error4", 1000L) + + // Should evict topic1 or topic3 (earliest expiration), not the updated topic2 + val errors = cache.getErrorsForTopics(Set("topic1", "topic2", "topic3", "topic4"), mockTime.milliseconds()) + assertEquals(3, errors.size) + assertTrue(errors.contains("topic2")) + assertEquals("error2_updated", errors("topic2")) + } + + @Test + def testStaleEntriesInQueueHandledCorrectly(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + // Add and update same topic multiple times + cache.put("topic1", "error1", 1000L) + cache.put("topic1", "error2", 2000L) + cache.put("topic1", "error3", 3000L) + + // Only latest value should be returned + val errors = cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds()) + assertEquals(1, errors.size) + assertEquals("error3", errors("topic1")) + + // Advance time to expire first two entries + mockTime.sleep(2500L) + + // Force cleanup by adding new entry + cache.put("topic2", "error_new", 1000L) + + // topic1 should still be available with latest value + val errorsAfterCleanup = cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds()) + assertEquals(1, errorsAfterCleanup.size) + assertEquals("error3", errorsAfterCleanup("topic1")) + } + + // Edge Cases + + @Test + def testEmptyCache(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + val errors = cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()) + assertTrue(errors.isEmpty) + } + + @Test + def testSingleEntryCache(): Unit = { + cache = new ExpiringErrorCache(1, mockTime) + + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 1000L) + + // Only most recent should remain + val errors = cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()) + assertEquals(1, errors.size) + assertFalse(errors.contains("topic1")) + assertTrue(errors.contains("topic2")) + } + + @Test + def testZeroTTL(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 0L) + + // Entry expires immediately + assertTrue(cache.getErrorsForTopics(Set("topic1"), mockTime.milliseconds()).isEmpty) + } + + @Test + def testClearOperation(): Unit = { + cache = new ExpiringErrorCache(10, mockTime) + + cache.put("topic1", "error1", 1000L) + cache.put("topic2", "error2", 1000L) + + assertEquals(2, cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()).size) + + cache.clear() + + assertTrue(cache.getErrorsForTopics(Set("topic1", "topic2"), mockTime.milliseconds()).isEmpty) + } + + // Concurrent Access Tests + + @Test + def testConcurrentPutOperations(): Unit = { + cache = new ExpiringErrorCache(100, mockTime) + val numThreads = 10 + val numTopicsPerThread = 20 + val latch = new CountDownLatch(numThreads) + + (1 to numThreads).foreach { threadId => + Future { + try { + for (i <- 1 to numTopicsPerThread) { + cache.put(s"topic_${threadId}_$i", s"error_${threadId}_$i", 1000L) + } + } finally { + latch.countDown() + } + } + } + + assertTrue(latch.await(5, TimeUnit.SECONDS)) + + // Verify all entries were added + val allTopics = (1 to numThreads).flatMap { threadId => + (1 to numTopicsPerThread).map(i => s"topic_${threadId}_$i") + }.toSet + + val errors = cache.getErrorsForTopics(allTopics, mockTime.milliseconds()) + assertEquals(100, errors.size) // Limited by cache capacity + } + + @Test + def testConcurrentPutAndGet(): Unit = { + cache = new ExpiringErrorCache(100, mockTime) + val numOperations = 1000 + val random = new Random() + val topics = (1 to 50).map(i => s"topic$i").toArray + + val futures = (1 to numOperations).map { _ => + Future { + if (random.nextBoolean()) { + // Put operation + val topic = topics(random.nextInt(topics.length)) + cache.put(topic, s"error_${random.nextInt()}", 1000L) + } else { + // Get operation + val topicsToGet = Set(topics(random.nextInt(topics.length))) + cache.getErrorsForTopics(topicsToGet, mockTime.milliseconds()) + } + } + } + + // Wait for all operations to complete + Future.sequence(futures).map(_ => ()) + } + + @Test + def testConcurrentUpdates(): Unit = { + cache = new ExpiringErrorCache(50, mockTime) + val numThreads = 10 + val numUpdatesPerThread = 100 + val sharedTopics = (1 to 10).map(i => s"shared_topic$i").toArray + val latch = new CountDownLatch(numThreads) + + (1 to numThreads).foreach { threadId => + Future { + try { + val random = new Random() + for (i <- 1 to numUpdatesPerThread) { + val topic = sharedTopics(random.nextInt(sharedTopics.length)) + cache.put(topic, s"error_thread${threadId}_update$i", 1000L) + } + } finally { + latch.countDown() + } + } + } + + assertTrue(latch.await(5, TimeUnit.SECONDS)) + + // Verify all shared topics have some value + val errors = cache.getErrorsForTopics(sharedTopics.toSet, mockTime.milliseconds()) + sharedTopics.foreach { topic => + assertTrue(errors.contains(topic), s"Topic $topic should have a value") + assertTrue(errors(topic).startsWith("error_thread"), s"Value should be from one of the threads") + } + } +} \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala index a446bc9036098..63215defd8f7e 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala @@ -25,9 +25,7 @@ import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.util.{Optional, Properties} import scala.jdk.CollectionConverters._ @@ -104,9 +102,8 @@ class FetchRequestMaxBytesTest extends BaseRequestTest { * Note that when a single batch is larger than FetchMaxBytes, it will be * returned in full even if this is larger than FetchMaxBytes. See KIP-74. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsumeMultipleRecords(quorum: String): Unit = { + @Test + def testConsumeMultipleRecords(): Unit = { createTopics() expectNextRecords(IndexedSeq(messages(0), messages(1)), 0) diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala index f96b2ceca3159..5f5c17f50e693 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala @@ -26,8 +26,7 @@ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{IsolationLevel, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.server.record.BrokerCompressionType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import java.util import java.util.Optional @@ -41,9 +40,8 @@ import scala.util.Random */ class FetchRequestTest extends BaseFetchRequestTest { - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testBrokerRespectsPartitionsOrderAndSizeLimits(quorum: String): Unit = { + @Test + def testBrokerRespectsPartitionsOrderAndSizeLimits(): Unit = { initProducer() val messagesPerPartition = 9 @@ -144,9 +142,8 @@ class FetchRequestTest extends BaseFetchRequestTest { evaluateResponse4(fetchResponse4V12, 12) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchRequestV4WithReadCommitted(quorum: String): Unit = { + @Test + def testFetchRequestV4WithReadCommitted(): Unit = { initProducer() val maxPartitionBytes = 200 val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head @@ -163,9 +160,8 @@ class FetchRequestTest extends BaseFetchRequestTest { assertTrue(records(partitionData).map(_.sizeInBytes).sum > 0) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchRequestToNonReplica(quorum: String): Unit = { + @Test + def testFetchRequestToNonReplica(): Unit = { val topic = "topic" val partition = 0 val topicPartition = new TopicPartition(topic, partition) @@ -194,15 +190,13 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, oldPartitionData.errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testLastFetchedEpochValidation(quorum: String): Unit = { + @Test + def testLastFetchedEpochValidation(): Unit = { checkLastFetchedEpochValidation(ApiKeys.FETCH.latestVersion()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testLastFetchedEpochValidationV12(quorum: String): Unit = { + @Test + def testLastFetchedEpochValidationV12(): Unit = { checkLastFetchedEpochValidation(12) } @@ -249,15 +243,13 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(firstEpochEndOffset, divergingEpoch.endOffset) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCurrentEpochValidation(quorum: String): Unit = { + @Test + def testCurrentEpochValidation(): Unit = { checkCurrentEpochValidation(ApiKeys.FETCH.latestVersion()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCurrentEpochValidationV12(quorum: String): Unit = { + @Test + def testCurrentEpochValidationV12(): Unit = { checkCurrentEpochValidation(12) } @@ -299,15 +291,13 @@ class FetchRequestTest extends BaseFetchRequestTest { assertResponseErrorForEpoch(Errors.FENCED_LEADER_EPOCH, followerId, Optional.of(secondLeaderEpoch - 1)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testEpochValidationWithinFetchSession(quorum: String): Unit = { + @Test + def testEpochValidationWithinFetchSession(): Unit = { checkEpochValidationWithinFetchSession(ApiKeys.FETCH.latestVersion()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testEpochValidationWithinFetchSessionV12(quorum: String): Unit = { + @Test + def testEpochValidationWithinFetchSessionV12(): Unit = { checkEpochValidationWithinFetchSession(12) } @@ -367,9 +357,8 @@ class FetchRequestTest extends BaseFetchRequestTest { * those partitions are returned in all incremental fetch requests. * This tests using FetchRequests that don't use topic IDs */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCreateIncrementalFetchWithPartitionsInErrorV12(quorum: String): Unit = { + @Test + def testCreateIncrementalFetchWithPartitionsInErrorV12(): Unit = { def createConsumerFetchRequest(topicPartitions: Seq[TopicPartition], metadata: JFetchMetadata, toForget: Seq[TopicIdPartition]): FetchRequest = @@ -430,9 +419,8 @@ class FetchRequestTest extends BaseFetchRequestTest { /** * Test that when a Fetch Request receives an unknown topic ID, it returns a top level error. */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchWithPartitionsWithIdError(quorum: String): Unit = { + @Test + def testFetchWithPartitionsWithIdError(): Unit = { def createConsumerFetchRequest(fetchData: util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData], metadata: JFetchMetadata, toForget: Seq[TopicIdPartition]): FetchRequest = { @@ -475,9 +463,8 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_ID.code, responseData1.get(bar0).errorCode) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testZStdCompressedTopic(quorum: String): Unit = { + @Test + def testZStdCompressedTopic(): Unit = { // ZSTD compressed topic val topicConfig = Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> BrokerCompressionType.ZSTD.name) val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, configs = topicConfig).head @@ -523,9 +510,8 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(3, records(data2).size) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testZStdCompressedRecords(quorum: String): Unit = { + @Test + def testZStdCompressedRecords(): Unit = { // Producer compressed topic val topicConfig = Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> BrokerCompressionType.PRODUCER.name) val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, configs = topicConfig).head diff --git a/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala b/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala deleted file mode 100644 index 2c10decb3ed5c..0000000000000 --- a/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package unit.kafka.server - -import kafka.server.ForwardingManagerMetrics -import org.apache.kafka.common.MetricName -import org.apache.kafka.common.metrics.Metrics -import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.Test - -import java.util.Collections -import scala.jdk.CollectionConverters._ - -final class ForwardingManagerMetricsTest { - @Test - def testMetricsNames(): Unit = { - val metrics = new Metrics() - val expectedGroup = "ForwardingManager" - - val expectedMetrics = Set( - new MetricName("QueueTimeMs.p99", expectedGroup, "", Collections.emptyMap()), - new MetricName("QueueTimeMs.p999", expectedGroup, "", Collections.emptyMap()), - new MetricName("QueueLength", expectedGroup, "", Collections.emptyMap()), - new MetricName("RemoteTimeMs.p99", expectedGroup, "", Collections.emptyMap()), - new MetricName("RemoteTimeMs.p999", expectedGroup, "", Collections.emptyMap()) - ) - - var metricsMap = metrics.metrics().asScala.filter { case (name, _) => name.group == expectedGroup } - assertEquals(0, metricsMap.size) - - ForwardingManagerMetrics(metrics, 1000) - metricsMap = metrics.metrics().asScala.filter { case (name, _) => name.group == expectedGroup } - assertEquals(metricsMap.size, expectedMetrics.size) - metricsMap.foreach { case (name, _) => - assertTrue(expectedMetrics.contains(name)) - } - } - - @Test - def testQueueTimeMs(): Unit = { - val metrics = new Metrics() - - val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) - val queueTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP99Name) - val queueTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) - assertEquals(Double.NaN, queueTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(Double.NaN, queueTimeMsP999.metricValue.asInstanceOf[Double]) - for(i <- 0 to 999) { - forwardingManagerMetrics.queueTimeMsHist.record(i) - } - assertEquals(990.0, queueTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(999.0, queueTimeMsP999.metricValue.asInstanceOf[Double]) - } - - @Test - def testQueueLength(): Unit = { - val metrics = new Metrics() - - val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) - val queueLength = metrics.metrics().get(forwardingManagerMetrics.queueLengthName) - assertEquals(0, queueLength.metricValue.asInstanceOf[Int]) - forwardingManagerMetrics.queueLength.getAndIncrement() - assertEquals(1, queueLength.metricValue.asInstanceOf[Int]) - } - - @Test - def testRemoteTimeMs(): Unit = { - val metrics = new Metrics() - - val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) - val remoteTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.remoteTimeMsHist.latencyP99Name) - val remoteTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.remoteTimeMsHist.latencyP999Name) - assertEquals(Double.NaN, remoteTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(Double.NaN, remoteTimeMsP999.metricValue.asInstanceOf[Double]) - for (i <- 0 to 999) { - forwardingManagerMetrics.remoteTimeMsHist.record(i) - } - assertEquals(990.0, remoteTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(999.0, remoteTimeMsP999.metricValue.asInstanceOf[Double]) - } - - @Test - def testTimeoutMs(): Unit = { - val metrics = new Metrics() - val timeoutMs = 500 - val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, timeoutMs) - val queueTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP99Name) - val queueTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) - assertEquals(Double.NaN, queueTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(Double.NaN, queueTimeMsP999.metricValue.asInstanceOf[Double]) - for(i <- 0 to 99) { - forwardingManagerMetrics.queueTimeMsHist.record(i) - } - forwardingManagerMetrics.queueTimeMsHist.record(1000) - - assertEquals(99, queueTimeMsP99.metricValue.asInstanceOf[Double]) - assertEquals(timeoutMs * 0.999, queueTimeMsP999.metricValue.asInstanceOf[Double]) - } -} diff --git a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala index d2d8d3e0382c3..16e4b2bcb66f4 100644 --- a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala @@ -53,7 +53,7 @@ class ForwardingManagerTest { private val forwardingManager = new ForwardingManagerImpl(brokerToController, metrics) private val principalBuilder = new DefaultKafkaPrincipalBuilder(null, null) private val queueTimeMsP999 = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) - private val queueLength = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueLengthName) + private val queueLength = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueLengthName()) private val remoteTimeMsP999 = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.remoteTimeMsHist.latencyP999Name) private def controllerApiVersions: NodeApiVersions = { diff --git a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala index dce9261f519b5..0b96a8355fccb 100644 --- a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala @@ -19,25 +19,31 @@ package kafka.server import kafka.network.SocketServer import kafka.utils.TestUtils import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord, RecordMetadata} -import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.{TopicCollection, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.message.DeleteGroupsResponseData.{DeletableGroupResult, DeletableGroupResultCollection} import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment -import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData, ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, EndTxnRequestData, HeartbeatRequestData, HeartbeatResponseData, InitProducerIdRequestData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData} +import org.apache.kafka.common.message.WriteTxnMarkersRequestData.{WritableTxnMarker, WritableTxnMarkerTopic} +import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData, ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, EndTxnRequestData, HeartbeatRequestData, HeartbeatResponseData, InitProducerIdRequestData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchRequestData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, StreamsGroupDescribeRequestData, StreamsGroupDescribeResponseData, StreamsGroupHeartbeatRequestData, StreamsGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData, WriteTxnMarkersRequestData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, AddOffsetsToTxnRequest, AddOffsetsToTxnResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, EndTxnRequest, EndTxnResponse, HeartbeatRequest, HeartbeatResponse, InitProducerIdRequest, InitProducerIdResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse, TxnOffsetCommitRequest, TxnOffsetCommitResponse} +import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, AddOffsetsToTxnRequest, AddOffsetsToTxnResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, EndTxnRequest, EndTxnResponse, HeartbeatRequest, HeartbeatResponse, InitProducerIdRequest, InitProducerIdResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, StreamsGroupDescribeRequest, StreamsGroupDescribeResponse, StreamsGroupHeartbeatRequest, StreamsGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse, TxnOffsetCommitRequest, TxnOffsetCommitResponse, WriteTxnMarkersRequest, WriteTxnMarkersResponse} import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.utils.ProducerIdAndEpoch import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT +import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions.{assertEquals, fail} +import java.net.Socket +import java.util import java.util.{Comparator, Properties} import java.util.stream.Collectors import scala.collection.Seq +import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ -import scala.reflect.ClassTag + class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { private def brokers(): Seq[KafkaBroker] = cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).asScala.toSeq @@ -46,6 +52,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected var producer: KafkaProducer[String, String] = _ + protected var openSockets: ListBuffer[Socket] = ListBuffer[Socket]() + protected def createOffsetsTopic(): Unit = { val admin = cluster.admin() try { @@ -75,7 +83,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected def createTopic( topic: String, numPartitions: Int - ): Unit = { + ): Uuid = { val admin = cluster.admin() try { TestUtils.createTopicWithAdmin( @@ -85,6 +93,26 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { topic = topic, numPartitions = numPartitions ) + admin + .describeTopics(TopicCollection.ofTopicNames(List(topic).asJava)) + .allTopicNames() + .get() + .get(topic) + .topicId() + } finally { + admin.close() + } + } + + protected def deleteTopic( + topic: String + ): Unit = { + val admin = cluster.admin() + try { + admin + .deleteTopics(TopicCollection.ofTopicNames(List(topic).asJava)) + .all() + .get() } finally { admin.close() } @@ -107,7 +135,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { replicationFactor = replicationFactor, topicConfig = topicConfig ) - partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds(topic), new TopicPartition(topic, partition)) -> leader } + partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds.get(topic), new TopicPartition(topic, partition)) -> leader } } finally { admin.close() } @@ -117,8 +145,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { cluster.brokers.values.stream.allMatch(b => b.config.unstableApiVersionsEnabled) } - protected def getTopicIds: Map[String, Uuid] = { - cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap + protected def getTopicIds: util.Map[String, Uuid] = { + cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get() } protected def getBrokers: Seq[KafkaBroker] = { @@ -134,6 +162,14 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { keySerializer = new StringSerializer, valueSerializer = new StringSerializer) } + protected def closeSockets(): Unit = { + while (openSockets.nonEmpty) { + val socket = openSockets.head + socket.close() + openSockets.remove(0) + } + } + protected def closeProducer(): Unit = { if(producer != null) producer.close() @@ -166,18 +202,24 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { memberId: String, memberEpoch: Int, topic: String, + topicId: Uuid, partition: Int, offset: Long, expectedError: Errors, version: Short = ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled) ): Unit = { - val request = new OffsetCommitRequest.Builder( + if (version >= 10 && topicId == Uuid.ZERO_UUID) { + throw new IllegalArgumentException(s"Cannot call OffsetCommit API version $version without a topic id") + } + + val request = OffsetCommitRequest.Builder.forTopicIdsOrNames( new OffsetCommitRequestData() .setGroupId(groupId) .setMemberId(memberId) .setGenerationIdOrMemberEpoch(memberEpoch) .setTopics(List( new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(topicId) .setName(topic) .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() @@ -191,7 +233,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { val expectedResponse = new OffsetCommitResponseData() .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName(topic) + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(if (version < 10) topic else "") .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(partition) @@ -311,21 +354,71 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { assertEquals(expectedError, connectAndReceive[EndTxnResponse](request).error) } + protected def writeTxnMarkers( + producerId: Long, + producerEpoch: Short, + committed: Boolean, + expectedError: Errors = Errors.NONE, + version: Short = ApiKeys.WRITE_TXN_MARKERS.latestVersion(isUnstableApiEnabled) + ): Unit = { + val request = new WriteTxnMarkersRequest.Builder( + new WriteTxnMarkersRequestData() + .setMarkers(List( + new WritableTxnMarker() + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setTransactionResult(committed) + .setTopics(List( + new WritableTxnMarkerTopic() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setPartitionIndexes(List[Integer](0).asJava) + ).asJava) + .setCoordinatorEpoch(0) + ).asJava) + ).build(version) + + assertEquals( + expectedError.code, + connectAndReceive[WriteTxnMarkersResponse](request).data.markers.get(0).topics.get(0).partitions.get(0).errorCode + ) + } + protected def fetchOffsets( - groupId: String, - memberId: String, - memberEpoch: Int, - partitions: List[TopicPartition], + groups: List[OffsetFetchRequestData.OffsetFetchRequestGroup], + requireStable: Boolean, + version: Short + ): List[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + if (version < 8) { + fail(s"OffsetFetch API version $version cannot fetch multiple groups.") + } + + val request = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(groups.asJava), + false, + true + ).build(version) + + val response = connectAndReceive[OffsetFetchResponse](request) + + // Sort topics and partitions within the response as their order is not guaranteed. + response.data.groups.asScala.foreach(sortTopicPartitions) + + response.data.groups.asScala.toList + } + + protected def fetchOffsets( + group: OffsetFetchRequestData.OffsetFetchRequestGroup, requireStable: Boolean, version: Short ): OffsetFetchResponseData.OffsetFetchResponseGroup = { - val request = new OffsetFetchRequest.Builder( - groupId, - memberId, - memberEpoch, - requireStable, - if (partitions == null) null else partitions.asJava, - false + val request = OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(List(group).asJava), + false, + true ).build(version) val response = connectAndReceive[OffsetFetchResponse](request) @@ -334,11 +427,11 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { // same format to the caller. val groupResponse = if (version >= 8) { assertEquals(1, response.data.groups.size) - assertEquals(groupId, response.data.groups.get(0).groupId) + assertEquals(group.groupId, response.data.groups.get(0).groupId) response.data.groups.asScala.head } else { new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(groupId) + .setGroupId(group.groupId) .setErrorCode(response.data.errorCode) .setTopics(response.data.topics.asScala.map { topic => new OffsetFetchResponseData.OffsetFetchResponseTopics() @@ -360,27 +453,25 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { groupResponse } - protected def fetchOffsets( - groups: Map[String, List[TopicPartition]], - requireStable: Boolean, - version: Short - ): List[OffsetFetchResponseData.OffsetFetchResponseGroup] = { - if (version < 8) { - fail(s"OffsetFetch API version $version cannot fetch multiple groups.") - } - - val request = new OffsetFetchRequest.Builder( - groups.map { case (k, v) => (k, v.asJava) }.asJava, - requireStable, - false - ).build(version) - - val response = connectAndReceive[OffsetFetchResponse](request) - - // Sort topics and partitions within the response as their order is not guaranteed. - response.data.groups.asScala.foreach(sortTopicPartitions) - - response.data.groups.asScala.toList + protected def fetchOffset( + groupId: String, + topic: String, + partition: Int + ): Long = { + val groupIdRecord = fetchOffsets( + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(topic) + .setPartitionIndexes(List[Integer](partition).asJava) + ).asJava), + requireStable = true, + version = 9 + ) + val topicRecord = groupIdRecord.topics.asScala.find(_.name == topic).head + val partitionRecord = topicRecord.partitions.asScala.find(_.partitionIndex == partition).head + partitionRecord.committedOffset } protected def deleteOffset( @@ -670,14 +761,28 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { val shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( new ShareGroupDescribeRequestData() .setGroupIds(groupIds.asJava) - .setIncludeAuthorizedOperations(includeAuthorizedOperations), - true + .setIncludeAuthorizedOperations(includeAuthorizedOperations) ).build(version) val shareGroupDescribeResponse = connectAndReceive[ShareGroupDescribeResponse](shareGroupDescribeRequest) shareGroupDescribeResponse.data.groups.asScala.toList } + protected def streamsGroupDescribe( + groupIds: List[String], + includeAuthorizedOperations: Boolean = false, + version: Short = ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled) + ): List[StreamsGroupDescribeResponseData.DescribedGroup] = { + val streamsGroupDescribeRequest = new StreamsGroupDescribeRequest.Builder( + new StreamsGroupDescribeRequestData() + .setGroupIds(groupIds.asJava) + .setIncludeAuthorizedOperations(includeAuthorizedOperations) + ).build(version) + + val streamsGroupDescribeResponse = connectAndReceive[StreamsGroupDescribeResponse](streamsGroupDescribeRequest) + streamsGroupDescribeResponse.data.groups.asScala.toList + } + protected def heartbeat( groupId: String, generationId: Int, @@ -751,8 +856,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { .setMemberId(memberId) .setMemberEpoch(memberEpoch) .setRackId(rackId) - .setSubscribedTopicNames(subscribedTopicNames.asJava), - true + .setSubscribedTopicNames(subscribedTopicNames.asJava) ).build() // Send the request until receiving a successful response. There is a delay @@ -766,6 +870,41 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data } + protected def streamsGroupHeartbeat( + groupId: String, + memberId: String = "", + memberEpoch: Int = 0, + rebalanceTimeoutMs: Int = -1, + activeTasks: List[StreamsGroupHeartbeatRequestData.TaskIds] = null, + standbyTasks: List[StreamsGroupHeartbeatRequestData.TaskIds] = null, + warmupTasks: List[StreamsGroupHeartbeatRequestData.TaskIds] = null, + topology: StreamsGroupHeartbeatRequestData.Topology = null, + expectedError: Errors = Errors.NONE, + version: Short = ApiKeys.STREAMS_GROUP_HEARTBEAT.latestVersion(isUnstableApiEnabled) + ): StreamsGroupHeartbeatResponseData = { + val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequest.Builder( + new StreamsGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setRebalanceTimeoutMs(rebalanceTimeoutMs) + .setActiveTasks(activeTasks.asJava) + .setStandbyTasks(standbyTasks.asJava) + .setWarmupTasks(warmupTasks.asJava) + .setTopology(topology) + ).build(version) + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var streamsGroupHeartbeatResponse: StreamsGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + streamsGroupHeartbeatResponse = connectAndReceive[StreamsGroupHeartbeatResponse](streamsGroupHeartbeatRequest) + streamsGroupHeartbeatResponse.data.errorCode == expectedError.code + }, msg = s"Could not heartbeat successfully. Last response $streamsGroupHeartbeatResponse.") + + streamsGroupHeartbeatResponse.data + } + protected def leaveGroupWithNewProtocol( groupId: String, memberId: String @@ -864,25 +1003,31 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { assertEquals(expectedResponseData.results.asScala.toSet, deleteGroupsResponse.data.results.asScala.toSet) } + protected def connectAny(): Socket = { + val socket: Socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) + openSockets += socket + socket + } + + protected def connect(destination: Int): Socket = { + val socket = IntegrationTestUtils.connect(brokerSocketServer(destination).boundPort(cluster.clientListener())) + openSockets += socket + socket + } + protected def connectAndReceive[T <: AbstractResponse]( request: AbstractRequest - )(implicit classTag: ClassTag[T]): T = { - IntegrationTestUtils.connectAndReceive[T]( - request, - cluster.anyBrokerSocketServer(), - cluster.clientListener() - ) + ): T = { + IntegrationTestUtils.connectAndReceive[T](request, cluster.brokerBoundPorts().get(0)) } protected def connectAndReceive[T <: AbstractResponse]( request: AbstractRequest, destination: Int - )(implicit classTag: ClassTag[T]): T = { - IntegrationTestUtils.connectAndReceive[T]( - request, - brokerSocketServer(destination), - cluster.clientListener() - ) + ): T = { + val socketServer = brokerSocketServer(destination) + val listenerName = cluster.clientListener() + IntegrationTestUtils.connectAndReceive[T](request, socketServer.boundPort(listenerName)) } private def brokerSocketServer(brokerId: Int): SocketServer = { diff --git a/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala index 0bc22194dc4f8..3026cdecb2754 100644 --- a/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala @@ -179,6 +179,15 @@ class HeartbeatRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas expectedError = Errors.UNKNOWN_MEMBER_ID, version = version.toShort ) + + // Heartbeat with empty group id. + heartbeat( + groupId = "", + memberId = leaderMemberId, + generationId = -1, + expectedError = Errors.INVALID_GROUP_ID, + version = version.toShort + ) } } } diff --git a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala index 378c38e530fdd..9ea25f76b4673 100755 --- a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala +++ b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala @@ -34,6 +34,8 @@ import org.apache.kafka.storage.internals.log.{CleanerConfig, LogDirFailureChann import java.util.Optional +import scala.jdk.CollectionConverters._ + class HighwatermarkPersistenceTest { val configs = TestUtils.createBrokerConfigs(2).map(KafkaConfig.fromProps) @@ -41,7 +43,7 @@ class HighwatermarkPersistenceTest { val configRepository = new MockConfigRepository() val logManagers = configs map { config => TestUtils.createLogManager( - logDirs = config.logDirs.map(new File(_)), + logDirs = config.logDirs.asScala.map(new File(_)), cleanerConfig = new CleanerConfig(true)) } @@ -195,7 +197,7 @@ class HighwatermarkPersistenceTest { } private def hwmFor(replicaManager: ReplicaManager, topic: String, partition: Int): Long = { - replicaManager.highWatermarkCheckpoints(new File(replicaManager.config.logDirs.head).getAbsolutePath).read().getOrDefault( + replicaManager.highWatermarkCheckpoints(new File(replicaManager.config.logDirs.get(0)).getAbsolutePath).read().getOrDefault( new TopicPartition(topic, partition), 0L) } } diff --git a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala index 5ebf32accace6..5836f3618c181 100644 --- a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala +++ b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala @@ -100,11 +100,11 @@ class IsrExpirationTest { // let the follower catch up to the Leader logEndOffset - 1 for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 1), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = leaderLogEndOffset, - brokerEpoch = 1L) + new LogOffsetMetadata(leaderLogEndOffset - 1), + 0L, + time.milliseconds, + leaderLogEndOffset, + 1L) var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -150,11 +150,11 @@ class IsrExpirationTest { // Make the remote replica not read to the end of log. It should be not be out of sync for at least 100 ms for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 2), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = leaderLogEndOffset, - brokerEpoch = 1L) + new LogOffsetMetadata(leaderLogEndOffset - 2), + 0L, + time.milliseconds, + leaderLogEndOffset, + 1L) // Simulate 2 fetch requests spanning more than 100 ms which do not read to the end of the log. // The replicas will no longer be in ISR. We do 2 fetches because we want to simulate the case where the replica is lagging but is not stuck @@ -165,11 +165,11 @@ class IsrExpirationTest { partition0.remoteReplicas.foreach { r => r.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 1), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = leaderLogEndOffset, - brokerEpoch = 1L) + new LogOffsetMetadata(leaderLogEndOffset - 1), + 0L, + time.milliseconds, + leaderLogEndOffset, + 1L) } partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -183,11 +183,11 @@ class IsrExpirationTest { // Now actually make a fetch to the end of the log. The replicas should be back in ISR partition0.remoteReplicas.foreach { r => r.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = leaderLogEndOffset, - brokerEpoch = 1L) + new LogOffsetMetadata(leaderLogEndOffset), + 0L, + time.milliseconds, + leaderLogEndOffset, + 1L) } partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -208,11 +208,11 @@ class IsrExpirationTest { // let the follower catch up to the Leader logEndOffset for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = leaderLogEndOffset, - brokerEpoch = 1L) + new LogOffsetMetadata(leaderLogEndOffset), + 0L, + time.milliseconds, + leaderLogEndOffset, + 1L) var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -245,11 +245,11 @@ class IsrExpirationTest { // set lastCaughtUpTime to current time for (replica <- partition.remoteReplicas) replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(0L), - followerStartOffset = 0L, - followerFetchTimeMs= time.milliseconds, - leaderEndOffset = 0L, - brokerEpoch = 1L) + new LogOffsetMetadata(0L), + 0L, + time.milliseconds, + 0L, + 1L) // set the leader and its hw and the hw update time partition.leaderReplicaIdOpt = Some(leaderId) diff --git a/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala index b6c21af8abfe2..0f2ab3669c90d 100644 --- a/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala @@ -139,6 +139,17 @@ class JoinGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas ) ) + // Join with an empty group id. + verifyJoinGroupResponseDataEquals( + new JoinGroupResponseData() + .setErrorCode(Errors.INVALID_GROUP_ID.code) + .setProtocolName(if (version >= 7) null else ""), + sendJoinRequest( + groupId = "", + version = version.toShort + ) + ) + // Join with an inconsistent protocolType. verifyJoinGroupResponseDataEquals( new JoinGroupResponseData() diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index c644043168438..bdd62291407a5 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -23,7 +23,7 @@ import kafka.network.RequestChannel import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache import kafka.server.share.SharePartitionManager -import kafka.utils.{CoreUtils, Logging, LoggingController, TestUtils} +import kafka.utils.{CoreUtils, Logging, TestUtils} import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} import org.apache.kafka.common._ @@ -39,18 +39,18 @@ import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartiti import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResult import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterConfigsResource => LAlterConfigsResource, AlterConfigsResourceCollection => LAlterConfigsResourceCollection, AlterableConfig => LAlterableConfig, AlterableConfigCollection => LAlterableConfigCollection} import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => LAlterConfigsResourceResponse} +import org.apache.kafka.common.message.AlterShareGroupOffsetsRequestData.{AlterShareGroupOffsetsRequestPartition, AlterShareGroupOffsetsRequestTopic, AlterShareGroupOffsetsRequestTopicCollection} import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.{DescribedGroup, TopicPartitions} import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic -import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData.{DeleteShareGroupOffsetsResponsePartition, DeleteShareGroupOffsetsResponseTopic} +import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData.{DescribeShareGroupOffsetsRequestGroup, DescribeShareGroupOffsetsRequestTopic} import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData.{DescribeShareGroupOffsetsResponseGroup, DescribeShareGroupOffsetsResponsePartition, DescribeShareGroupOffsetsResponseTopic} import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.{AlterConfigsResource => IAlterConfigsResource, AlterConfigsResourceCollection => IAlterConfigsResourceCollection, AlterableConfig => IAlterableConfig, AlterableConfigCollection => IAlterableConfigCollection} import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.{AlterConfigsResourceResponse => IAlterConfigsResourceResponse} import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity -import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic @@ -83,16 +83,18 @@ import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorTes import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} import org.apache.kafka.metadata.{ConfigRepository, MetadataCache, MockConfigRepository} +import org.apache.kafka.network.Session import org.apache.kafka.network.metrics.{RequestChannelMetrics, RequestMetrics} import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.{ClientMetricsManager, SimpleApiVersionManager} import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authorizer} -import org.apache.kafka.server.common.{FeatureVersion, FinalizedFeatures, GroupVersion, KRaftVersion, MetadataVersion, RequestLocal, TransactionVersion} +import org.apache.kafka.server.common.{FeatureVersion, FinalizedFeatures, GroupVersion, KRaftVersion, MetadataVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.logger.LoggingController import org.apache.kafka.server.metrics.ClientMetricsTestUtils import org.apache.kafka.server.share.{CachedSharePartition, ErroneousAndValidPartitionData, SharePartitionKey} -import org.apache.kafka.server.quota.ThrottleCallback +import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager, ThrottleCallback} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch import org.apache.kafka.server.share.context.{FinalContext, ShareSessionContext} import org.apache.kafka.server.share.session.{ShareSession, ShareSessionKey} @@ -114,10 +116,9 @@ import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.time.Duration import java.util -import java.util.Arrays.asList import java.util.concurrent.{CompletableFuture, TimeUnit} import java.util.function.Consumer -import java.util.{Collections, Comparator, Optional, OptionalInt, OptionalLong, Properties} +import java.util.{Comparator, Optional, OptionalInt, OptionalLong, Properties} import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ @@ -167,7 +168,8 @@ class KafkaApisTest extends Logging { authorizer: Option[Authorizer] = None, configRepository: ConfigRepository = new MockConfigRepository(), overrideProperties: Map[String, String] = Map.empty, - featureVersions: Seq[FeatureVersion] = Seq.empty + featureVersions: Seq[FeatureVersion] = Seq.empty, + autoTopicCreationManager: Option[AutoTopicCreationManager] = None ): KafkaApis = { val properties = TestUtils.createBrokerConfig(brokerId) @@ -182,7 +184,7 @@ class KafkaApisTest extends Logging { val apiVersionManager = new SimpleApiVersionManager( ListenerType.BROKER, true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) setupFeatures(featureVersions) @@ -192,8 +194,8 @@ class KafkaApisTest extends Logging { replicaManager = replicaManager, groupCoordinator = groupCoordinator, txnCoordinator = txnCoordinator, - shareCoordinator = Some(shareCoordinator), - autoTopicCreationManager = autoTopicCreationManager, + shareCoordinator = shareCoordinator, + autoTopicCreationManager = autoTopicCreationManager.getOrElse(this.autoTopicCreationManager), brokerId = brokerId, config = config, configRepository = configRepository, @@ -225,6 +227,28 @@ class KafkaApisTest extends Logging { } } + def initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups: Boolean = true): MetadataCache = { + val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) + val delta = new MetadataDelta(MetadataImage.EMPTY) + delta.replay(new FeatureLevelRecord() + .setName(MetadataVersion.FEATURE_NAME) + .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) + ) + if (enableShareGroups) { + delta.replay(new FeatureLevelRecord() + .setName(ShareVersion.FEATURE_NAME) + .setFeatureLevel(ShareVersion.SV_1.featureLevel()) + ) + } else { + delta.replay(new FeatureLevelRecord() + .setName(ShareVersion.FEATURE_NAME) + .setFeatureLevel(ShareVersion.SV_0.featureLevel()) + ) + } + cache.setImage(delta.apply(MetadataProvenance.EMPTY)) + cache + } + @Test def testDescribeConfigsWithAuthorizer(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -235,14 +259,14 @@ class KafkaApisTest extends Logging { val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = Seq( + val expectedActions = util.List.of( new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL), 1, true, true) ) // Verify that authorize is only called once - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) val topicConfigs = new Properties() @@ -256,16 +280,16 @@ class KafkaApisTest extends Logging { val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(resourceName) - .setResourceType(ConfigResource.Type.TOPIC.id)).asJava)) + .setResourceType(ConfigResource.Type.TOPIC.id)))) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) kafkaApis = createKafkaApis(authorizer = Some(authorizer), configRepository = configRepository) kafkaApis.handleDescribeConfigsRequest(request) - verify(authorizer).authorize(any(), ArgumentMatchers.eq(expectedActions.asJava)) + verify(authorizer).authorize(any(), ArgumentMatchers.eq(expectedActions)) val response = verifyNoThrottling[DescribeConfigsResponse](request) val results = response.data.results assertEquals(1, results.size) @@ -319,13 +343,13 @@ class KafkaApisTest extends Logging { val consumerGroupId = "consumer_group_1" val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = Seq( + val expectedActions = util.List.of( new Action(operation, new ResourcePattern(resourceType, consumerGroupId, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) val cgConfigs = new Properties() @@ -344,9 +368,9 @@ class KafkaApisTest extends Logging { val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(consumerGroupId) - .setResourceType(ConfigResource.Type.GROUP.id)).asJava)) + .setResourceType(ConfigResource.Type.GROUP.id)))) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) @@ -379,10 +403,10 @@ class KafkaApisTest extends Logging { props.forEach((x, y) => configEntries.add(new AlterConfigsRequest.ConfigEntry(x.asInstanceOf[String], y.asInstanceOf[String]))) - val configs = Map(authorizedResource -> new AlterConfigsRequest.Config(configEntries)) + val configs = util.Map.of(authorizedResource, new AlterConfigsRequest.Config(configEntries)) val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, clientId, 0) - val apiRequest = new AlterConfigsRequest.Builder(configs.asJava, false).build(requestHeader.apiVersion) + val apiRequest = new AlterConfigsRequest.Builder(configs, false).build(requestHeader.apiVersion) val request = buildRequest(apiRequest) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) @@ -435,13 +459,13 @@ class KafkaApisTest extends Logging { val subscriptionName = "client_metric_subscription_1" val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = Seq( + val expectedActions = util.List.of( new Action(operation, new ResourcePattern(resourceType, Resource.CLUSTER_NAME, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) val resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) @@ -453,9 +477,9 @@ class KafkaApisTest extends Logging { val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(subscriptionName) - .setResourceType(ConfigResource.Type.CLIENT_METRICS.id)).asJava)) + .setResourceType(ConfigResource.Type.CLIENT_METRICS.id)))) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) @@ -546,8 +570,8 @@ class KafkaApisTest extends Logging { new ResourcePattern(resourceType, resourceName, PatternType.LITERAL), 1, logIfAllowed, logIfDenied) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(Seq(expectedAuthorizedAction).asJava))) - .thenReturn(Seq(result).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(util.List.of(expectedAuthorizedAction)))) + .thenReturn(util.List.of(result)) } @Test @@ -628,7 +652,8 @@ class KafkaApisTest extends Logging { val expectedThrottleTimeMs = math.max(controllerThrottleTimeMs, requestThrottleTimeMs) verify(clientRequestQuotaManager).throttle( - ArgumentMatchers.eq(request), + ArgumentMatchers.eq(request.header.clientId()), + ArgumentMatchers.eq(request.session), any[ThrottleCallback](), ArgumentMatchers.eq(expectedThrottleTimeMs) ) @@ -736,7 +761,7 @@ class KafkaApisTest extends Logging { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(coordinatorType.id()) - .setCoordinatorKeys(asList(key))) + .setCoordinatorKeys(util.List.of(key))) } else { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() @@ -747,7 +772,7 @@ class KafkaApisTest extends Logging { when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - val capturedRequest = verifyTopicCreation(topicName, true, true, request) + val capturedRequest = verifyTopicCreation(topicName, enableAutoTopicCreation = true, isInternal = true, request) kafkaApis = createKafkaApis(authorizer = Some(authorizer), overrideProperties = topicConfigOverride) kafkaApis.handleFindCoordinatorRequest(request) @@ -771,7 +796,7 @@ class KafkaApisTest extends Logging { def testFindCoordinatorWithInvalidSharePartitionKey(): Unit = { val request = new FindCoordinatorRequestData() .setKeyType(CoordinatorType.SHARE.id) - .setCoordinatorKeys(asList("")) + .setCoordinatorKeys(util.List.of("")) val requestChannelRequest = buildRequest(new FindCoordinatorRequest.Builder(request).build()) @@ -779,7 +804,7 @@ class KafkaApisTest extends Logging { kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val expectedResponse = new FindCoordinatorResponseData() - .setCoordinators(asList( + .setCoordinators(util.List.of( new FindCoordinatorResponseData.Coordinator() .setKey("") .setErrorCode(Errors.INVALID_REQUEST.code) @@ -798,7 +823,7 @@ class KafkaApisTest extends Logging { val request = new FindCoordinatorRequestData() .setKeyType(CoordinatorType.SHARE.id) - .setCoordinatorKeys(asList(key.asCoordinatorKey)) + .setCoordinatorKeys(util.List.of(key.asCoordinatorKey)) val requestChannelRequest = buildRequest(new FindCoordinatorRequest.Builder(request).build()) @@ -808,7 +833,7 @@ class KafkaApisTest extends Logging { when(shareCoordinator.partitionFor(ArgumentMatchers.eq(key))).thenReturn(10) val expectedResponse = new FindCoordinatorResponseData() - .setCoordinators(asList( + .setCoordinators(util.List.of( new FindCoordinatorResponseData.Coordinator() .setKey(key.asCoordinatorKey) .setNodeId(0) @@ -900,7 +925,7 @@ class KafkaApisTest extends Logging { } val metadataRequest = new MetadataRequest.Builder( - List(topicName).asJava, enableAutoTopicCreation + util.List.of(topicName), enableAutoTopicCreation ).build(requestHeader.apiVersion) val request = buildRequest(metadataRequest) @@ -912,11 +937,11 @@ class KafkaApisTest extends Logging { kafkaApis.handleTopicMetadataRequest(request) val response = verifyNoThrottling[MetadataResponse](request) - val expectedMetadataResponse = util.Collections.singletonList(new TopicMetadata( + val expectedMetadataResponse = util.List.of(new TopicMetadata( expectedError, topicName, isInternal, - util.Collections.emptyList() + util.List.of() )) assertEquals(expectedMetadataResponse, response.topicMetadata()) @@ -933,12 +958,15 @@ class KafkaApisTest extends Logging { request: RequestChannel.Request): ArgumentCaptor[Option[RequestContext]] = { val capturedRequest: ArgumentCaptor[Option[RequestContext]] = ArgumentCaptor.forClass(classOf[Option[RequestContext]]) if (enableAutoTopicCreation) { - when(clientControllerQuotaManager.newPermissiveQuotaFor(ArgumentMatchers.eq(request))) - .thenReturn(UnboundedControllerMutationQuota) + + when(clientControllerQuotaManager.newPermissiveQuotaFor( + ArgumentMatchers.eq(request.session), + ArgumentMatchers.eq(request.header.clientId()) + )).thenReturn(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA) when(autoTopicCreationManager.createTopics( ArgumentMatchers.eq(Set(topicName)), - ArgumentMatchers.eq(UnboundedControllerMutationQuota), + ArgumentMatchers.eq(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA), capturedRequest.capture())).thenReturn( Seq(new MetadataResponseTopic() .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) @@ -969,7 +997,7 @@ class KafkaApisTest extends Logging { val invalidVersions = Set(10, 11) invalidVersions.foreach( version => topics.foreach(topic => { - val metadataRequestData = new MetadataRequestData().setTopics(Collections.singletonList(topic)) + val metadataRequestData = new MetadataRequestData().setTopics(util.List.of(topic)) val request = buildRequest(new MetadataRequest(metadataRequestData, version.toShort)) val kafkaApis = createKafkaApis() try { @@ -992,27 +1020,43 @@ class KafkaApisTest extends Logging { ) } - @Test - def testHandleOffsetCommitRequest(): Unit = { - addTopicToMetadataCache("foo", numPartitions = 1) + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + def testHandleOffsetCommitRequest(version: Short): Unit = { + val topicName = "foo" + val topicId = Uuid.randomUuid() + addTopicToMetadataCache(topicName, topicId = topicId, numPartitions = 1) val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setName("foo") - .setPartitions(List( + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(if (version < 10) topicName else "") + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10)).asJava)).asJava) + .setCommittedOffset(10))))) - val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) + val expectedOffsetCommitRequest = new OffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setTopics(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(topicName) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(10))))) + + val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build(version)) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( requestChannelRequest.context, - offsetCommitRequest, + expectedOffsetCommitRequest, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -1023,40 +1067,57 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetCommitResponse = new OffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName("foo") - .setPartitions(List( + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(if (version < 10) topicName else "") + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) future.complete(offsetCommitResponse) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(offsetCommitResponse, response.data) } - @Test - def testHandleOffsetCommitRequestFutureFailed(): Unit = { - addTopicToMetadataCache("foo", numPartitions = 1) + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + def testHandleOffsetCommitRequestFutureFailed(version: Short): Unit = { + val topicName = "foo" + val topicId = Uuid.randomUuid() + addTopicToMetadataCache(topicName, topicId = topicId, numPartitions = 1) val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setName("foo") - .setPartitions(List( + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(if (version < 10) topicName else "") + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10)).asJava)).asJava) + .setCommittedOffset(10))))) - val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) + val expectedOffsetCommitRequest = new OffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setTopics(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(topicName) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(10))))) + + val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build(version)) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( requestChannelRequest.context, - offsetCommitRequest, + expectedOffsetCommitRequest, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) @@ -1067,19 +1128,175 @@ class KafkaApisTest extends Logging { ) val expectedOffsetCommitResponse = new OffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName("foo") - .setPartitions(List( + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName(if (version < 10) topicName else "") + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NOT_COORDINATOR.code)).asJava)).asJava) + .setErrorCode(Errors.NOT_COORDINATOR.code))))) future.completeExceptionally(Errors.NOT_COORDINATOR.exception) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(expectedOffsetCommitResponse, response.data) } + @Test + def testHandleOffsetCommitRequestTopicsAndPartitionsValidationWithTopicIds(): Unit = { + val fooId = Uuid.randomUuid() + val barId = Uuid.randomUuid() + val zarId = Uuid.randomUuid() + val fooName = "foo" + val barName = "bar" + addTopicToMetadataCache(fooName, topicId = fooId, numPartitions = 2) + addTopicToMetadataCache(barName, topicId = barId, numPartitions = 2) + + val offsetCommitRequest = new OffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setTopics(util.List.of( + // foo exists but only has 2 partitions. + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(fooId) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(10), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedOffset(20), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(2) + .setCommittedOffset(30))), + // bar exists. + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(barId) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(40), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedOffset(50))), + // zar does not exist. + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(zarId) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(60), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedOffset(70))))) + + val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build()) + + // This is the request expected by the group coordinator. + val expectedOffsetCommitRequest = new OffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setTopics(util.List.of( + // foo exists but only has 2 partitions. + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(fooId) + .setName(fooName) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(10), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedOffset(20))), + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setTopicId(barId) + .setName(barName) + .setPartitions(util.List.of( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(40), + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(1) + .setCommittedOffset(50))))) + + val future = new CompletableFuture[OffsetCommitResponseData]() + when(groupCoordinator.commitOffsets( + requestChannelRequest.context, + expectedOffsetCommitRequest, + RequestLocal.noCaching.bufferSupplier + )).thenReturn(future) + kafkaApis = createKafkaApis() + kafkaApis.handle( + requestChannelRequest, + RequestLocal.noCaching + ) + + // This is the response returned by the group coordinator. + val offsetCommitResponse = new OffsetCommitResponseData() + .setTopics(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setTopicId(fooId) + .setName(fooName) + .setPartitions(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code))), + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setTopicId(barId) + .setName(barName) + .setPartitions(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code))))) + + val expectedOffsetCommitResponse = new OffsetCommitResponseData() + .setTopics(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setTopicId(fooId) + .setPartitions(util.List.of( + // foo-2 is first because partitions failing the validation + // are put in the response first. + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(2) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code))), + // zar is before bar because topics failing the validation are + // put in the response first. + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setTopicId(zarId) + .setPartitions(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code))), + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setTopicId(barId) + .setPartitions(util.List.of( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code), + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code))))) + + future.complete(offsetCommitResponse) + val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) + assertEquals(expectedOffsetCommitResponse, response.data) + } + @Test def testHandleOffsetCommitRequestTopicsAndPartitionsValidation(): Unit = { addTopicToMetadataCache("foo", numPartitions = 2) @@ -1088,11 +1305,11 @@ class KafkaApisTest extends Logging { val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( // foo exists but only has 2 partitions. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), @@ -1101,54 +1318,54 @@ class KafkaApisTest extends Logging { .setCommittedOffset(20), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(2) - .setCommittedOffset(30)).asJava), + .setCommittedOffset(30))), // bar exists. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50)).asJava), + .setCommittedOffset(50))), // zar does not exist. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("zar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(60), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(70)).asJava)).asJava) + .setCommittedOffset(70))))) - val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) + val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicNames(offsetCommitRequest).build()) // This is the request expected by the group coordinator. val expectedOffsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( // foo exists but only has 2 partitions. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(20)).asJava), + .setCommittedOffset(20))), new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50)).asJava)).asJava) + .setCommittedOffset(50))))) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( @@ -1164,31 +1381,31 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetCommitResponse = new OffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava), + .setErrorCode(Errors.NONE.code))), new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) val expectedOffsetCommitResponse = new OffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( // foo-2 is first because partitions failing the validation // are put in the response first. new OffsetCommitResponseData.OffsetCommitResponsePartition() @@ -1199,75 +1416,33 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava), + .setErrorCode(Errors.NONE.code))), // zar is before bar because topics failing the validation are // put in the response first. new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("zar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)).asJava), + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code))), new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) future.complete(offsetCommitResponse) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(expectedOffsetCommitResponse, response.data) } - @Test - def testOffsetCommitWithInvalidPartition(): Unit = { - val topic = "topic" - addTopicToMetadataCache(topic, numPartitions = 1) - - def checkInvalidPartition(invalidPartitionId: Int): Unit = { - reset(replicaManager, clientRequestQuotaManager, requestChannel) - - val offsetCommitRequest = new OffsetCommitRequest.Builder( - new OffsetCommitRequestData() - .setGroupId("groupId") - .setTopics(Collections.singletonList( - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setName(topic) - .setPartitions(Collections.singletonList( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(invalidPartitionId) - .setCommittedOffset(15) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setCommittedMetadata("")) - ) - ))).build() - - val request = buildRequest(offsetCommitRequest) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - val kafkaApis = createKafkaApis() - try { - kafkaApis.handleOffsetCommitRequest(request, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[OffsetCommitResponse](request) - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, - Errors.forCode(response.data.topics().get(0).partitions().get(0).errorCode)) - } finally { - kafkaApis.close() - } - } - - checkInvalidPartition(-1) - checkInvalidPartition(1) // topic has only one partition - } - @Test def testTxnOffsetCommitWithInvalidPartition(): Unit = { val topic = "topic" @@ -1283,7 +1458,7 @@ class KafkaApisTest extends Logging { "groupId", 15L, 0.toShort, - Map(invalidTopicPartition -> partitionOffsetCommitData).asJava, + util.Map.of(invalidTopicPartition, partitionOffsetCommitData), true ).build() val request = buildRequest(offsetCommitRequest) @@ -1316,13 +1491,13 @@ class KafkaApisTest extends Logging { .setProducerEpoch(30) .setGroupInstanceId("instance-id") .setTransactionalId("transactional-id") - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10)).asJava)).asJava) + .setCommittedOffset(10))))) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1341,13 +1516,13 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val txnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) future.complete(txnOffsetCommitResponse) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1361,13 +1536,13 @@ class KafkaApisTest extends Logging { val txnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10)).asJava)).asJava) + .setCommittedOffset(10))))) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1385,13 +1560,13 @@ class KafkaApisTest extends Logging { ) val expectedTxnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NOT_COORDINATOR.code)).asJava)).asJava) + .setErrorCode(Errors.NOT_COORDINATOR.code))))) future.completeExceptionally(Errors.NOT_COORDINATOR.exception) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1406,11 +1581,11 @@ class KafkaApisTest extends Logging { val txnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( // foo exists but only has 2 partitions. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), @@ -1419,27 +1594,27 @@ class KafkaApisTest extends Logging { .setCommittedOffset(20), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(2) - .setCommittedOffset(30)).asJava), + .setCommittedOffset(30))), // bar exists. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50)).asJava), + .setCommittedOffset(50))), // zar does not exist. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("zar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(60), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(70)).asJava)).asJava) + .setCommittedOffset(70))))) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1447,26 +1622,26 @@ class KafkaApisTest extends Logging { val expectedTxnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(List( + .setTopics(util.List.of( // foo exists but only has 2 partitions. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(20)).asJava), + .setCommittedOffset(20))), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50)).asJava)).asJava) + .setCommittedOffset(50))))) val future = new CompletableFuture[TxnOffsetCommitResponseData]() when(txnCoordinator.partitionFor(expectedTxnOffsetCommitRequest.transactionalId)).thenReturn(0) @@ -1483,31 +1658,31 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val txnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava), + .setErrorCode(Errors.NONE.code))), new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) val expectedTxnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( // foo-2 is first because partitions failing the validation // are put in the response first. new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() @@ -1518,27 +1693,27 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava), + .setErrorCode(Errors.NONE.code))), // zar is before bar because topics failing the validation are // put in the response first. new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("zar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)).asJava), + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code))), new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code)).asJava)).asJava) + .setErrorCode(Errors.NONE.code))))) future.complete(txnOffsetCommitResponse) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1565,7 +1740,7 @@ class KafkaApisTest extends Logging { groupId, producerId, epoch, - Map(topicPartition -> partitionOffsetCommitData).asJava, + util.Map.of(topicPartition, partitionOffsetCommitData), version >= TxnOffsetCommitRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 ).build(version) val request = buildRequest(offsetCommitRequest) @@ -1580,15 +1755,15 @@ class KafkaApisTest extends Logging { )).thenReturn(future) future.complete(new TxnOffsetCommitResponseData() - .setTopics(List( + .setTopics(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName(topicPartition.topic) - .setPartitions(List( + .setPartitions(util.List.of( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(topicPartition.partition) .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code) - ).asJava) - ).asJava)) + )) + ))) kafkaApis = createKafkaApis() kafkaApis.handleTxnOffsetCommitRequest(request, requestLocal) @@ -1716,7 +1891,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(Set(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition))), + ArgumentMatchers.eq(util.Set.of(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition))), responseCallback.capture(), ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) @@ -1766,7 +1941,7 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - Collections.singletonList(topicPartition) + util.List.of(topicPartition) ).build(version.toShort) val request = buildRequest(addPartitionsToTxnRequest) @@ -1775,7 +1950,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(Set(topicPartition)), + ArgumentMatchers.eq(util.Set.of(topicPartition)), responseCallback.capture(), ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) @@ -1792,9 +1967,9 @@ class KafkaApisTest extends Logging { val response = capturedResponse.getValue if (version < 2) { - assertEquals(Collections.singletonMap(topicPartition, Errors.INVALID_PRODUCER_EPOCH), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) + assertEquals(util.Map.of(topicPartition, Errors.INVALID_PRODUCER_EPOCH), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) } else { - assertEquals(Collections.singletonMap(topicPartition, Errors.PRODUCER_FENCED), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) + assertEquals(util.Map.of(topicPartition, Errors.PRODUCER_FENCED), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) } } finally { kafkaApis.close() @@ -1824,23 +1999,23 @@ class KafkaApisTest extends Logging { // Allow WRITE but deny TWO_PHASE_COMMIT when(authorizer.authorize( any(), - ArgumentMatchers.eq(Collections.singletonList(new Action( + ArgumentMatchers.eq(util.List.of(new Action( AclOperation.WRITE, new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), 1, true, true))) - )).thenReturn(Collections.singletonList(AuthorizationResult.ALLOWED)) + )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) when(authorizer.authorize( any(), - ArgumentMatchers.eq(Collections.singletonList(new Action( + ArgumentMatchers.eq(util.List.of(new Action( AclOperation.TWO_PHASE_COMMIT, new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), 1, true, true))) - )).thenReturn(Collections.singletonList(AuthorizationResult.DENIED)) + )).thenReturn(util.List.of(AuthorizationResult.DENIED)) val capturedResponse = ArgumentCaptor.forClass(classOf[InitProducerIdResponse]) @@ -1877,23 +2052,23 @@ class KafkaApisTest extends Logging { // Both permissions are allowed when(authorizer.authorize( any(), - ArgumentMatchers.eq(Collections.singletonList(new Action( + ArgumentMatchers.eq(util.List.of(new Action( AclOperation.WRITE, new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), 1, true, true))) - )).thenReturn(Collections.singletonList(AuthorizationResult.ALLOWED)) + )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) when(authorizer.authorize( any(), - ArgumentMatchers.eq(Collections.singletonList(new Action( + ArgumentMatchers.eq(util.List.of(new Action( AclOperation.TWO_PHASE_COMMIT, new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), 1, true, true))) - )).thenReturn(Collections.singletonList(AuthorizationResult.ALLOWED)) + )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) val responseCallback = ArgumentCaptor.forClass(classOf[InitProducerIdResult => Unit]) @@ -1950,15 +2125,15 @@ class KafkaApisTest extends Logging { val addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - List(new AddPartitionsToTxnTransaction() + util.List.of(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId1) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(false) .setTopics(new AddPartitionsToTxnTopicCollection( - Collections.singletonList(new AddPartitionsToTxnTopic() + util.List.of(new AddPartitionsToTxnTopic() .setName(tp0.topic) - .setPartitions(Collections.singletonList(tp0.partition)) + .setPartitions(util.List.of(tp0.partition)) ).iterator()) ), new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId2) @@ -1966,12 +2141,12 @@ class KafkaApisTest extends Logging { .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - Collections.singletonList(new AddPartitionsToTxnTopic() + util.List.of(new AddPartitionsToTxnTopic() .setName(tp1.topic) - .setPartitions(Collections.singletonList(tp1.partition)) + .setPartitions(util.List.of(tp1.partition)) ).iterator()) ) - ).asJava.iterator() + ).iterator() ) ).build(4.toShort) val request = buildRequest(addPartitionsToTxnRequest) @@ -1981,7 +2156,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId1), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(Set(tp0)), + ArgumentMatchers.eq(util.Set.of(tp0)), responseCallback.capture(), any[TransactionVersion], ArgumentMatchers.eq(requestLocal) @@ -1991,18 +2166,18 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId2), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(Set(tp1)), + ArgumentMatchers.eq(util.Set.of(tp1)), verifyPartitionsCallback.capture(), - )).thenAnswer(_ => verifyPartitionsCallback.getValue.apply(AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, Map(tp1 -> Errors.PRODUCER_FENCED).asJava))) + )).thenAnswer(_ => verifyPartitionsCallback.getValue.apply(AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, util.Map.of(tp1, Errors.PRODUCER_FENCED)))) kafkaApis = createKafkaApis() kafkaApis.handleAddPartitionsToTxnRequest(request, requestLocal) val response = verifyNoThrottling[AddPartitionsToTxnResponse](request) - val expectedErrors = Map( - transactionalId1 -> Collections.singletonMap(tp0, Errors.NONE), - transactionalId2 -> Collections.singletonMap(tp1, Errors.PRODUCER_FENCED) - ).asJava + val expectedErrors = util.Map.of( + transactionalId1, util.Map.of(tp0, Errors.NONE), + transactionalId2, util.Map.of(tp1, Errors.PRODUCER_FENCED) + ) assertEquals(expectedErrors, response.errors()) } @@ -2010,7 +2185,7 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.ADD_PARTITIONS_TO_TXN) def testHandleAddPartitionsToTxnAuthorizationFailedAndMetrics(version: Short): Unit = { - val requestMetrics = new RequestChannelMetrics(Collections.singleton(ApiKeys.ADD_PARTITIONS_TO_TXN)) + val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.ADD_PARTITIONS_TO_TXN)) try { val topic = "topic" @@ -2026,27 +2201,27 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - Collections.singletonList(tp)).build(version) + util.List.of(tp)).build(version) else AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - List(new AddPartitionsToTxnTransaction() + util.List.of(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - Collections.singletonList(new AddPartitionsToTxnTopic() + util.List.of(new AddPartitionsToTxnTopic() .setName(tp.topic) - .setPartitions(Collections.singletonList(tp.partition)) + .setPartitions(util.List.of(tp.partition)) ).iterator())) - ).asJava.iterator())).build(version) + ).iterator())).build(version) val requestChannelRequest = buildRequest(addPartitionsToTxnRequest, requestMetrics = requestMetrics) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handle( requestChannelRequest, @@ -2087,21 +2262,21 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - List(tp0, tp1).asJava).build(version) + util.List.of(tp0, tp1)).build(version) else AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - List(new AddPartitionsToTxnTransaction() + util.List.of(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - Collections.singletonList(new AddPartitionsToTxnTopic() + util.List.of(new AddPartitionsToTxnTopic() .setName(tp0.topic) - .setPartitions(List[Integer](tp0.partition, tp1.partition()).asJava) + .setPartitions(util.List.of[Integer](tp0.partition, tp1.partition())) ).iterator())) - ).asJava.iterator())).build(version) + ).iterator())).build(version) val requestChannelRequest = buildRequest(addPartitionsToTxnRequest) kafkaApis = createKafkaApis() @@ -2185,88 +2360,429 @@ class KafkaApisTest extends Logging { } } - @Test - def shouldReplaceProducerFencedWithInvalidProducerEpochInProduceResponse(): Unit = { - val topic = "topic" - addTopicToMetadataCache(topic, numPartitions = 2) + @Test + def shouldReplaceProducerFencedWithInvalidProducerEpochInProduceResponse(): Unit = { + val topic = "topic" + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") + val tp = new TopicIdPartition(topicId, 0, "topic") + addTopicToMetadataCache(topic, numPartitions = 2, topicId = topicId) + + for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { + + reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) + + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + + val produceData = new ProduceRequestData.TopicProduceData() + .setPartitionData(util.List.of( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) + + if (version >= 13 ) { + produceData.setTopicId(topicId) + } else { + produceData.setName(tp.topic) + } + + val produceRequest = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection( + util.List.of(produceData) + .iterator)) + .setAcks(1.toShort) + .setTimeoutMs(5000)) + .build(version.toShort) + val request = buildRequest(produceRequest) + + when(replicaManager.handleProduceAppend(anyLong, + anyShort, + ArgumentMatchers.eq(false), + any(), + any(), + responseCallback.capture(), + any(), + any(), + any() + )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.INVALID_PRODUCER_EPOCH)))) + + when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), + any[Long])).thenReturn(0) + when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + val kafkaApis = createKafkaApis() + try { + kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) + + val response = verifyNoThrottling[ProduceResponse](request) + + assertEquals(1, response.data.responses.size) + val topicProduceResponse = response.data.responses.asScala.head + assertEquals(1, topicProduceResponse.partitionResponses.size) + val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head + assertEquals(Errors.INVALID_PRODUCER_EPOCH, Errors.forCode(partitionProduceResponse.errorCode)) + } finally { + kafkaApis.close() + } + } + } + + @Test + def testHandleShareFetchRequestQuotaTagsVerification(): Unit = { + val topicName = "foo" + val topicId = Uuid.randomUuid() + val partitionIndex = 0 + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName, 1, topicId = topicId) + val memberId: Uuid = Uuid.randomUuid() + val groupId = "group" + + // Create test principal and client address to verify quota tags + val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") + val testClientAddress = InetAddress.getByName("192.168.1.100") + val testClientId = "test-client-id" + + // Mock share partition manager responses + val records = memoryRecords(10, 0) + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + new ShareFetchResponseData.PartitionData() + .setErrorCode(Errors.NONE.code) + .setAcknowledgeErrorCode(Errors.NONE.code) + .setRecords(records) + .setAcquiredRecords(new util.ArrayList(util.List.of( + new ShareFetchResponseData.AcquiredRecords() + .setFirstOffset(0) + .setLastOffset(9) + .setDeliveryCount(1) + )))))) + + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( + new TopicIdPartition(topicId, partitionIndex, topicName))) + ) + + // Create argument captors to verify session information passed to quota managers + val sessionCaptorFetch = ArgumentCaptor.forClass(classOf[Session]) + val clientIdCaptor = ArgumentCaptor.forClass(classOf[String]) + val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) + + // Mock quota manager responses and capture arguments + when(quotas.fetch.maybeRecordAndGetThrottleTimeMs( + sessionCaptorFetch.capture(), clientIdCaptor.capture(), anyDouble, anyLong)).thenReturn(0) + when(quotas.request.maybeRecordAndGetThrottleTimeMs( + requestCaptor.capture(), anyLong)).thenReturn(0) + + // Create ShareFetch request + val shareFetchRequestData = new ShareFetchRequestData() + .setGroupId(groupId) + .setMemberId(memberId.toString) + .setShareSessionEpoch(0) + .setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic() + .setTopicId(topicId) + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(partitionIndex) + ).iterator)) + ).iterator)) + + val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) + + // Create request with custom principal and client address to test quota tags + val requestHeader = new RequestHeader(shareFetchRequest.apiKey, shareFetchRequest.version, testClientId, 0) + val request = buildRequest(shareFetchRequest, testPrincipal, testClientAddress, + ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) + + // Test that the request itself contains the proper tags and information + assertEquals(testClientId, request.header.clientId) + assertEquals(testPrincipal, request.context.principal) + assertEquals(testClientAddress, request.context.clientAddress) + assertEquals(ApiKeys.SHARE_FETCH, request.header.apiKey) + assertEquals("1", request.context.connectionId) + + kafkaApis = createKafkaApis() + kafkaApis.handleShareFetchRequest(request) + val response = verifyNoThrottling[ShareFetchResponse](request) + + // Verify response is successful + val responseData = response.data() + assertEquals(Errors.NONE.code, responseData.errorCode) + + // Verify that quota methods were called and captured session information + verify(quotas.fetch, times(1)).maybeRecordAndGetThrottleTimeMs( + any[Session](), anyString, anyDouble, anyLong) + verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( + any[RequestChannel.Request](), anyLong) + + // Verify the Session data passed to fetch quota manager is exactly what was defined in the test + val capturedSession = sessionCaptorFetch.getValue + assertNotNull(capturedSession) + assertNotNull(capturedSession.principal) + assertEquals(KafkaPrincipal.USER_TYPE, capturedSession.principal.getPrincipalType) + assertEquals("test-user", capturedSession.principal.getName) + assertEquals(testClientAddress, capturedSession.clientAddress) + assertEquals("test-user", capturedSession.sanitizedUser) + + // Verify client ID passed to fetch quota manager matches what was defined + val capturedClientId = clientIdCaptor.getValue + assertEquals(testClientId, capturedClientId) + + // Verify the Request data passed to request quota manager is exactly what was defined + val capturedRequest = requestCaptor.getValue + assertNotNull(capturedRequest) + assertEquals(testClientId, capturedRequest.header.clientId) + assertEquals(testPrincipal, capturedRequest.context.principal) + assertEquals(testClientAddress, capturedRequest.context.clientAddress) + assertEquals(ApiKeys.SHARE_FETCH, capturedRequest.header.apiKey) + } + + @Test + def testHandleShareAcknowledgeRequestQuotaTagsVerification(): Unit = { + val topicName = "foo" + val topicId = Uuid.randomUuid() + val partitionIndex = 0 + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName, 1, topicId = topicId) + val memberId: Uuid = Uuid.randomUuid() + val groupId = "group" + + // Create test principal and client address to verify quota tags + val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") + val testClientAddress = InetAddress.getByName("192.168.1.100") + val testClientId = "test-client-id" + + // Mock share partition manager acknowledge response + when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + new ShareAcknowledgeResponseData.PartitionData() + .setPartitionIndex(partitionIndex) + .setErrorCode(Errors.NONE.code)))) + + // Create argument captors to verify session information passed to quota managers + val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) + + // Mock quota manager responses and capture arguments + // For ShareAcknowledge, we only verify Request quota (not fetch quota) + when(quotas.request.maybeRecordAndGetThrottleTimeMs( + requestCaptor.capture(), anyLong)).thenReturn(0) - for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { + // Create ShareAcknowledge request + val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() + .setGroupId(groupId) + .setMemberId(memberId.toString) + .setShareSessionEpoch(1) + .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection( + util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(topicId) + .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection( + util.List.of(new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitionIndex(partitionIndex) + .setAcknowledgementBatches(util.List.of( + new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator)) + ).iterator)) - reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) + val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) + + // Create request with custom principal and client address to test quota tags + val requestHeader = new RequestHeader(shareAcknowledgeRequest.apiKey, shareAcknowledgeRequest.version, testClientId, 0) + val request = buildRequest(shareAcknowledgeRequest, testPrincipal, testClientAddress, + ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) + + // Test that the request itself contains the proper tags and information + assertEquals(testClientId, request.header.clientId) + assertEquals(testPrincipal, request.context.principal) + assertEquals(testClientAddress, request.context.clientAddress) + assertEquals(ApiKeys.SHARE_ACKNOWLEDGE, request.header.apiKey) + assertEquals("1", request.context.connectionId) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + kafkaApis = createKafkaApis() + kafkaApis.handleShareAcknowledgeRequest(request) + val response = verifyNoThrottling[ShareAcknowledgeResponse](request) + + // Verify response is successful + val responseData = response.data() + assertEquals(Errors.NONE.code, responseData.errorCode) + + // Verify that request quota method was called + verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( + any[RequestChannel.Request](), anyLong) + + // Verify that fetch quota method was NOT called (ShareAcknowledge only uses request quota) + verify(quotas.fetch, times(0)).maybeRecordAndGetThrottleTimeMs( + any[Session](), anyString, anyDouble, anyLong) + + // Verify the Request data passed to request quota manager is exactly what was defined + val capturedRequest = requestCaptor.getValue + assertNotNull(capturedRequest) + assertEquals(testClientId, capturedRequest.header.clientId) + assertEquals(testPrincipal, capturedRequest.context.principal) + assertEquals(testClientAddress, capturedRequest.context.clientAddress) + assertEquals(ApiKeys.SHARE_ACKNOWLEDGE, capturedRequest.header.apiKey) + } + + @Test + def testHandleShareFetchWithAcknowledgementQuotaTagsVerification(): Unit = { + val topicName = "foo" + val topicId = Uuid.randomUuid() + val partitionIndex = 0 + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName, 1, topicId = topicId) + val memberId: Uuid = Uuid.randomUuid() + val groupId = "group" + + // Create test principal and client address to verify quota tags + val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") + val testClientAddress = InetAddress.getByName("192.168.1.100") + val testClientId = "test-client-id" + + // Mock share partition manager responses for both fetch and acknowledge + val records = memoryRecords(10, 0) + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + new ShareFetchResponseData.PartitionData() + .setErrorCode(Errors.NONE.code) + .setAcknowledgeErrorCode(Errors.NONE.code) + .setRecords(records) + .setAcquiredRecords(new util.ArrayList(util.List.of( + new ShareFetchResponseData.AcquiredRecords() + .setFirstOffset(0) + .setLastOffset(9) + .setDeliveryCount(1) + )))))) - val tp = new TopicPartition("topic", 0) + when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + new ShareAcknowledgeResponseData.PartitionData() + .setPartitionIndex(partitionIndex) + .setErrorCode(Errors.NONE.code)))) - val produceRequest = ProduceRequest.builder(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) - .iterator)) - .setAcks(1.toShort) - .setTimeoutMs(5000)) - .build(version.toShort) - val request = buildRequest(produceRequest) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 1), util.List.of( + new TopicIdPartition(topicId, partitionIndex, topicName))) + ) - when(replicaManager.handleProduceAppend(anyLong, - anyShort, - ArgumentMatchers.eq(false), - any(), - any(), - responseCallback.capture(), - any(), - any(), - any() - )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.INVALID_PRODUCER_EPOCH)))) + // Create argument captors to verify session information passed to quota managers + val sessionCaptorFetch = ArgumentCaptor.forClass(classOf[Session]) + val clientIdCaptor = ArgumentCaptor.forClass(classOf[String]) + val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) + + // Mock quota manager responses and capture arguments + when(quotas.fetch.maybeRecordAndGetThrottleTimeMs( + sessionCaptorFetch.capture(), clientIdCaptor.capture(), anyDouble, anyLong)).thenReturn(0) + when(quotas.request.maybeRecordAndGetThrottleTimeMs( + requestCaptor.capture(), anyLong)).thenReturn(0) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) - val kafkaApis = createKafkaApis() - try { - kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) + // Create ShareFetch request with acknowledgement data + val shareFetchRequestData = new ShareFetchRequestData() + .setGroupId(groupId) + .setMemberId(memberId.toString) + .setShareSessionEpoch(1) + .setMaxWaitMs(100) + .setMinBytes(1) + .setMaxBytes(1000000) + .setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic() + .setTopicId(topicId) + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(partitionIndex) + .setAcknowledgementBatches(util.List.of( + new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator)) + ).iterator)) - val response = verifyNoThrottling[ProduceResponse](request) + val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) + + // Create request with custom principal and client address to test quota tags + val requestHeader = new RequestHeader(shareFetchRequest.apiKey, shareFetchRequest.version, testClientId, 0) + val request = buildRequest(shareFetchRequest, testPrincipal, testClientAddress, + ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) + + // Test that the request itself contains the proper tags and information + assertEquals(testClientId, request.header.clientId) + assertEquals(testPrincipal, request.context.principal) + assertEquals(testClientAddress, request.context.clientAddress) + assertEquals(ApiKeys.SHARE_FETCH, request.header.apiKey) + assertEquals("1", request.context.connectionId) - assertEquals(1, response.data.responses.size) - val topicProduceResponse = response.data.responses.asScala.head - assertEquals(1, topicProduceResponse.partitionResponses.size) - val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - assertEquals(Errors.INVALID_PRODUCER_EPOCH, Errors.forCode(partitionProduceResponse.errorCode)) - } finally { - kafkaApis.close() - } - } + kafkaApis = createKafkaApis() + kafkaApis.handleShareFetchRequest(request) + val response = verifyNoThrottling[ShareFetchResponse](request) + + // Verify response is successful + val responseData = response.data() + assertEquals(Errors.NONE.code, responseData.errorCode) + + // Verify that quota methods were called exactly once each (not twice despite having acknowledgements) + verify(quotas.fetch, times(1)).maybeRecordAndGetThrottleTimeMs( + any[Session](), anyString, anyDouble, anyLong) + verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( + any[RequestChannel.Request](), anyLong) + + // Verify the Session data passed to fetch quota manager is exactly what was defined in the test + val capturedSession = sessionCaptorFetch.getValue + assertNotNull(capturedSession) + assertNotNull(capturedSession.principal) + assertEquals(KafkaPrincipal.USER_TYPE, capturedSession.principal.getPrincipalType) + assertEquals("test-user", capturedSession.principal.getName) + assertEquals(testClientAddress, capturedSession.clientAddress) + assertEquals("test-user", capturedSession.sanitizedUser) + + // Verify client ID passed to fetch quota manager matches what was defined + val capturedClientId = clientIdCaptor.getValue + assertEquals(testClientId, capturedClientId) + + // Verify the Request data passed to request quota manager is exactly what was defined + val capturedRequest = requestCaptor.getValue + assertNotNull(capturedRequest) + assertEquals(testClientId, capturedRequest.header.clientId) + assertEquals(testPrincipal, capturedRequest.context.principal) + assertEquals(testClientAddress, capturedRequest.context.clientAddress) + assertEquals(ApiKeys.SHARE_FETCH, capturedRequest.header.apiKey) } @Test def testProduceResponseContainsNewLeaderOnNotLeaderOrFollower(): Unit = { val topic = "topic" - addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3) + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") + addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3, topicId = topicId) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) - val tp = new TopicPartition(topic, 0) + val tp = new TopicIdPartition(topicId, 0, topic) val partition = mock(classOf[Partition]) val newLeaderId = 2 val newLeaderEpoch = 5 + val produceData = new ProduceRequestData.TopicProduceData() + .setPartitionData(util.List.of( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) + + if (version >= 13 ) { + produceData.setTopicId(topicId) + } else { + produceData.setName(tp.topic) + } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) - .iterator)) + util.List.of(produceData).iterator)) .setAcks(1.toShort) .setTimeoutMs(5000)) .build(version.toShort) @@ -2283,14 +2799,14 @@ class KafkaApisTest extends Logging { any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Right(partition)) + when(replicaManager.getPartitionOrError(tp.topicPartition())).thenAnswer(_ => Right(partition)) when(partition.leaderReplicaIdOpt).thenAnswer(_ => Some(newLeaderId)) when(partition.getLeaderEpoch).thenAnswer(_ => newLeaderEpoch) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) @@ -2313,24 +2829,31 @@ class KafkaApisTest extends Logging { @Test def testProduceResponseReplicaManagerLookupErrorOnNotLeaderOrFollower(): Unit = { val topic = "topic" - addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3) + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") + addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3, topicId = topicId) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) - val tp = new TopicPartition(topic, 0) + val tp = new TopicIdPartition(topicId, 0, topic) + + val produceData = new ProduceRequestData.TopicProduceData() + .setPartitionData(util.List.of( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) + if (version >= 13 ) { + produceData.setTopicId(topicId) + } else { + produceData.setName(tp.topic) + } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) - .iterator)) + util.List.of(produceData).iterator)) .setAcks(1.toShort) .setTimeoutMs(5000)) .build(version.toShort) @@ -2347,12 +2870,12 @@ class KafkaApisTest extends Logging { any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + when(replicaManager.getPartitionOrError(tp.topicPartition())).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) @@ -2376,20 +2899,29 @@ class KafkaApisTest extends Logging { @Test def testProduceResponseMetadataLookupErrorOnNotLeaderOrFollower(): Unit = { val topic = "topic" + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") metadataCache = mock(classOf[KRaftMetadataCache]) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) - val tp = new TopicPartition(topic, 0) + val tp = new TopicIdPartition(topicId, 0, topic) + + val topicProduceData = new ProduceRequestData.TopicProduceData() + + if (version >= 13 ) { + topicProduceData.setTopicId(topicId) + } else { + topicProduceData.setName(tp.topic) + } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( + util.List.of(topicProduceData + .setPartitionData(util.List.of( new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) @@ -2410,16 +2942,21 @@ class KafkaApisTest extends Logging { any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + when(replicaManager.getPartitionOrError(tp.topicPartition)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) - when(metadataCache.contains(tp)).thenAnswer(_ => true) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + when(metadataCache.contains(tp.topicPartition())).thenAnswer(_ => true) when(metadataCache.getLeaderAndIsr(tp.topic(), tp.partition())).thenAnswer(_ => Optional.empty()) when(metadataCache.getAliveBrokerNode(any(), any())).thenReturn(Optional.empty()) - kafkaApis = createKafkaApis() + if (version >= 13) { + when(metadataCache.getTopicName(tp.topicId())).thenReturn(Optional.of(tp.topic())) + } else { + when(metadataCache.getTopicId(tp.topic())).thenReturn(tp.topicId()) + } + val kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) val response = verifyNoThrottling[ProduceResponse](request) @@ -2440,21 +2977,28 @@ class KafkaApisTest extends Logging { val topic = "topic" val transactionalId = "txn1" - addTopicToMetadataCache(topic, numPartitions = 2) + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") + val tp = new TopicIdPartition(topicId, 0, "topic") + addTopicToMetadataCache(topic, numPartitions = 2, topicId = tp.topicId()) for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val tp = new TopicPartition("topic", 0) + val produceData = new ProduceRequestData.TopicProduceData() + .setPartitionData(util.List.of( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withTransactionalRecords(Compression.NONE, 0, 0, 0, new SimpleRecord("test".getBytes))))) + if (version >= 13 ) { + produceData.setTopicId(topicId) + } else { + produceData.setName(tp.topic) + } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic).setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withTransactionalRecords(Compression.NONE, 0, 0, 0, new SimpleRecord("test".getBytes)))))) + util.List.of(produceData) .iterator)) .setAcks(1.toShort) .setTransactionalId(transactionalId) @@ -2491,7 +3035,7 @@ class KafkaApisTest extends Logging { val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId) val addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient( - "txnlId", 15L, 0.toShort, List(invalidTopicPartition).asJava + "txnlId", 15L, 0.toShort, util.List.of(invalidTopicPartition) ).build() val request = buildRequest(addPartitionsToTxnRequest) @@ -2515,13 +3059,13 @@ class KafkaApisTest extends Logging { @Test def requiredAclsNotPresentWriteTxnMarkersThrowsAuthorizationException(): Unit = { val topicPartition = new TopicPartition("t", 0) - val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) + val (_, request) = createWriteTxnMarkersRequest(util.List.of(topicPartition)) val authorizer: Authorizer = mock(classOf[Authorizer]) val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) - val alterActions = Collections.singletonList(new Action(AclOperation.ALTER, clusterResource, 1, true, false)) - val clusterActions = Collections.singletonList(new Action(AclOperation.CLUSTER_ACTION, clusterResource, 1, true, true)) - val deniedList = Collections.singletonList(AuthorizationResult.DENIED) + val alterActions = util.List.of(new Action(AclOperation.ALTER, clusterResource, 1, true, false)) + val clusterActions = util.List.of(new Action(AclOperation.CLUSTER_ACTION, clusterResource, 1, true, true)) + val deniedList = util.List.of(AuthorizationResult.DENIED) when(authorizer.authorize( request.context, alterActions @@ -2539,8 +3083,8 @@ class KafkaApisTest extends Logging { @Test def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = { val topicPartition = new TopicPartition("t", 0) - val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) - val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava + val (_, request) = createWriteTxnMarkersRequest(util.List.of(topicPartition)) + val expectedErrors = util.Map.of(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) when(replicaManager.onlinePartition(topicPartition)) @@ -2563,9 +3107,9 @@ class KafkaApisTest extends Logging { // with no records. val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - asList( - new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), - new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), + util.List.of( + new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, util.List.of(topicPartition)), + new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, util.List.of(topicPartition)), )).build() val request = buildRequest(writeTxnMarkersRequest) val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) @@ -2597,11 +3141,12 @@ class KafkaApisTest extends Logging { def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = { val tp1 = new TopicPartition("t", 0) val tp2 = new TopicPartition("t1", 0) - val (_, request) = createWriteTxnMarkersRequest(asList(tp1, tp2)) - val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava + val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") + val (_, request) = createWriteTxnMarkersRequest(util.List.of(tp1, tp2)) + val expectedErrors = util.Map.of(tp1, Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2, Errors.NONE) val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) when(replicaManager.onlinePartition(tp1)) .thenReturn(None) @@ -2616,10 +3161,9 @@ class KafkaApisTest extends Logging { any(), responseCallback.capture(), any(), - any(), ArgumentMatchers.eq(requestLocal), any() - )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))) + )).thenAnswer(_ => responseCallback.getValue.apply(Map(new TopicIdPartition(topicId,tp2) -> new PartitionResponse(Errors.NONE)))) kafkaApis = createKafkaApis() kafkaApis.handleWriteTxnMarkersRequest(request, requestLocal) verify(requestChannel).sendResponse( @@ -2636,7 +3180,7 @@ class KafkaApisTest extends Logging { @ValueSource(strings = Array("ALTER", "CLUSTER_ACTION")) def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(allowedAclOperation: String): Unit = { val topicPartition = new TopicPartition("t", 0) - val request = createWriteTxnMarkersRequest(asList(topicPartition))._2 + val request = createWriteTxnMarkersRequest(util.List.of(topicPartition))._2 when(replicaManager.onlinePartition(topicPartition)) .thenReturn(Some(mock(classOf[Partition]))) @@ -2645,15 +3189,15 @@ class KafkaApisTest extends Logging { // Allowing WriteTxnMarkers API with the help of allowedAclOperation parameter. val authorizer: Authorizer = mock(classOf[Authorizer]) val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) - val allowedAction = Collections.singletonList(new Action( + val allowedAction = util.List.of(new Action( AclOperation.fromString(allowedAclOperation), clusterResource, 1, true, allowedAclOperation.equals("CLUSTER_ACTION") )) - val deniedList = Collections.singletonList(AuthorizationResult.DENIED) - val allowedList = Collections.singletonList(AuthorizationResult.ALLOWED) + val deniedList = util.List.of(AuthorizationResult.DENIED) + val allowedList = util.List.of(AuthorizationResult.ALLOWED) when(authorizer.authorize( ArgumentMatchers.eq(request.context), any() @@ -2672,7 +3216,6 @@ class KafkaApisTest extends Logging { any(), any(), any(), - any(), ArgumentMatchers.eq(requestLocal), any()) } @@ -2684,6 +3227,9 @@ class KafkaApisTest extends Logging { val foo0 = new TopicPartition("foo", 0) val foo1 = new TopicPartition("foo", 1) + val topicIds = Map( + Topic.GROUP_METADATA_TOPIC_NAME -> Uuid.fromString("JaTH2JYK2ed2GzUapg8tgg"), + "foo" -> Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg")) val allPartitions = List( offset0, offset1, @@ -2692,29 +3238,29 @@ class KafkaApisTest extends Logging { ) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - List( + util.List.of( new TxnMarkerEntry( 1L, 1.toShort, 0, TransactionResult.COMMIT, - List(offset0, foo0).asJava + util.List.of(offset0, foo0) ), new TxnMarkerEntry( 2L, 1.toShort, 0, TransactionResult.ABORT, - List(offset1, foo1).asJava + util.List.of(offset1, foo1) ) - ).asJava + ) ).build() val requestChannelRequest = buildRequest(writeTxnMarkersRequest) allPartitions.foreach { tp => - when(replicaManager.onlinePartition(tp)) - .thenReturn(Some(mock(classOf[Partition]))) + when(replicaManager.onlinePartition(tp)).thenReturn(Some(mock(classOf[Partition]))) + when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicIds.get(tp.topic()).getOrElse(Uuid.ZERO_UUID), tp)) } when(groupCoordinator.completeTransaction( @@ -2735,10 +3281,10 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) )).thenReturn(CompletableFuture.completedFuture[Void](null)) - val entriesPerPartition: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = - ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val entriesPerPartition: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) + val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords( ArgumentMatchers.eq(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT.toLong), @@ -2748,7 +3294,6 @@ class KafkaApisTest extends Logging { entriesPerPartition.capture(), responseCallback.capture(), any(), - any(), ArgumentMatchers.eq(RequestLocal.noCaching), any() )).thenAnswer { _ => @@ -2762,44 +3307,44 @@ class KafkaApisTest extends Logging { kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching) val expectedResponse = new WriteTxnMarkersResponseData() - .setMarkers(List( + .setMarkers(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(1L) - .setTopics(List( + .setTopics(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(List( + .setPartitions(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava), + )), new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) - ).asJava), + )) + )), new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(2L) - .setTopics(List( + .setTopics(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(List( + .setPartitions(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava), + )), new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava) - ).asJava) - ).asJava) + )) + )) + )) val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) assertEquals(normalize(expectedResponse), normalize(response.data)) @@ -2816,15 +3361,15 @@ class KafkaApisTest extends Logging { val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - List( + util.List.of( new TxnMarkerEntry( 1L, 1.toShort, 0, TransactionResult.COMMIT, - List(offset0).asJava + util.List.of(offset0) ) - ).asJava + ) ).build() val requestChannelRequest = buildRequest(writeTxnMarkersRequest) @@ -2851,19 +3396,19 @@ class KafkaApisTest extends Logging { } val expectedResponse = new WriteTxnMarkersResponseData() - .setMarkers(List( + .setMarkers(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(1L) - .setTopics(List( + .setTopics(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(List( + .setPartitions(util.List.of( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(expectedError.code) - ).asJava) - ).asJava) - ).asJava) + )) + )) + )) val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) assertEquals(normalize(expectedResponse), normalize(response.data)) @@ -2909,18 +3454,18 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroups(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( "group-1", "group-2", "group-3" - ).asJava) + )) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - List("group-1", "group-2", "group-3").asJava, + util.List.of("group-1", "group-2", "group-3"), RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -2929,7 +3474,7 @@ class KafkaApisTest extends Logging { RequestLocal.noCaching ) - val results = new DeleteGroupsResponseData.DeletableGroupResultCollection(List( + val results = new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") .setErrorCode(Errors.NONE.code), @@ -2939,7 +3484,7 @@ class KafkaApisTest extends Logging { new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code), - ).iterator.asJava) + ).iterator) future.complete(results) @@ -2952,18 +3497,18 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroupsFutureFailed(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( "group-1", "group-2", "group-3" - ).asJava) + )) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - List("group-1", "group-2", "group-3").asJava, + util.List.of("group-1", "group-2", "group-3"), RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -2975,7 +3520,7 @@ class KafkaApisTest extends Logging { future.completeExceptionally(Errors.NOT_CONTROLLER.exception) val expectedDeleteGroupsResponse = new DeleteGroupsResponseData() - .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( + .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") .setErrorCode(Errors.NOT_CONTROLLER.code), @@ -2985,7 +3530,7 @@ class KafkaApisTest extends Logging { new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.NOT_CONTROLLER.code), - ).iterator.asJava)) + ).iterator)) val response = verifyNoThrottling[DeleteGroupsResponse](requestChannelRequest) assertEquals(expectedDeleteGroupsResponse, response.data) @@ -2993,11 +3538,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroupsAuthenticationFailed(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( "group-1", "group-2", "group-3" - ).asJava) + )) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) @@ -3014,15 +3559,15 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - List("group-2", "group-3").asJava, + util.List.of("group-2", "group-3"), RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) @@ -3031,17 +3576,17 @@ class KafkaApisTest extends Logging { RequestLocal.noCaching ) - future.complete(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( + future.complete(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-2") .setErrorCode(Errors.NONE.code), new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.NONE.code) - ).iterator.asJava)) + ).iterator)) val expectedDeleteGroupsResponse = new DeleteGroupsResponseData() - .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( + .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-2") .setErrorCode(Errors.NONE.code), @@ -3050,7 +3595,7 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)).iterator.asJava)) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)).iterator)) val response = verifyNoThrottling[DeleteGroupsResponse](requestChannelRequest) assertEquals(expectedDeleteGroupsResponse, response.data) @@ -3058,12 +3603,12 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroups(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( "group-1", "group-2", "group-3", "group-4" - ).asJava) + )) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3075,15 +3620,15 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis() kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - val groupResults = List( + val groupResults = util.List.of( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") .setProtocolType("consumer") .setProtocolData("range") .setGroupState("Stable") - .setMembers(List( + .setMembers(util.List.of( new DescribeGroupsResponseData.DescribedGroupMember() - .setMemberId("member-1")).asJava), + .setMemberId("member-1"))), new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code), @@ -3095,7 +3640,7 @@ class KafkaApisTest extends Logging { .setGroupState("Dead") .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code) .setErrorMessage("Group group-4 is not a classic group.") - ).asJava + ) future.complete(groupResults) @@ -3106,11 +3651,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroupsFutureFailed(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( "group-1", "group-2", "group-3" - ).asJava) + )) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3122,7 +3667,7 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis() kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(List( + val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(util.List.of( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code), @@ -3132,7 +3677,7 @@ class KafkaApisTest extends Logging { new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-3") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) - ).asJava) + )) future.completeExceptionally(Errors.UNKNOWN_SERVER_ERROR.exception) @@ -3142,11 +3687,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroupsAuthenticationFailed(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( "group-1", "group-2", "group-3" - ).asJava) + )) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3163,26 +3708,26 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream(). + map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val future = new CompletableFuture[util.List[DescribeGroupsResponseData.DescribedGroup]]() when(groupCoordinator.describeGroups( requestChannelRequest.context, - List("group-2").asJava + util.List.of("group-2") )).thenReturn(future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - future.complete(List( + future.complete(util.List.of( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code) - ).asJava) + )) - val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(List( + val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(util.List.of( // group-1 and group-3 are first because unauthorized are put first into the response. new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") @@ -3193,7 +3738,7 @@ class KafkaApisTest extends Logging { new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code) - ).asJava) + )) val response = verifyNoThrottling[DescribeGroupsResponse](requestChannelRequest) assertEquals(expectedDescribeGroupsResponse, response.data) @@ -3208,14 +3753,14 @@ class KafkaApisTest extends Logging { val topics = new OffsetDeleteRequestTopicCollection() topics.add(new OffsetDeleteRequestTopic() .setName("topic-1") - .setPartitions(Seq( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), - new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava)) + new OffsetDeleteRequestPartition().setPartitionIndex(1)))) topics.add(new OffsetDeleteRequestTopic() .setName("topic-2") - .setPartitions(Seq( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), - new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava)) + new OffsetDeleteRequestPartition().setPartitionIndex(1)))) val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() @@ -3235,28 +3780,28 @@ class KafkaApisTest extends Logging { kafkaApis.handleOffsetDeleteRequest(request, requestLocal) val offsetDeleteResponseData = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection(List( + .setTopics(new OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection(util.List.of( new OffsetDeleteResponseData.OffsetDeleteResponseTopic() .setName("topic-1") - .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)), + ).iterator)), new OffsetDeleteResponseData.OffsetDeleteResponseTopic() .setName("topic-2") - .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)) - ).asJava.iterator())) + ).iterator)) + ).iterator())) future.complete(offsetDeleteResponseData) @@ -3272,30 +3817,30 @@ class KafkaApisTest extends Logging { val offsetDeleteRequest = new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(List( + .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of( // foo exists but has only 2 partitions. new OffsetDeleteRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1), new OffsetDeleteRequestPartition().setPartitionIndex(2) - ).asJava), + )), // bar exists. new OffsetDeleteRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - ).asJava), + )), // zar does not exist. new OffsetDeleteRequestTopic() .setName("zar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - ).asJava), - ).asJava.iterator)) + )), + ).iterator)) val requestChannelRequest = buildRequest(new OffsetDeleteRequest.Builder(offsetDeleteRequest).build()) @@ -3303,20 +3848,20 @@ class KafkaApisTest extends Logging { // only existing topic-partitions. val expectedOffsetDeleteRequest = new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(List( + .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of( new OffsetDeleteRequestTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - ).asJava), + )), new OffsetDeleteRequestTopic() .setName("bar") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - ).asJava) - ).asJava.iterator)) + )) + ).iterator)) val future = new CompletableFuture[OffsetDeleteResponseData]() when(groupCoordinator.deleteOffsets( @@ -3332,34 +3877,34 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetDeleteResponse = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(List( + .setTopics(new OffsetDeleteResponseTopicCollection(util.List.of( new OffsetDeleteResponseTopic() .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)), + ).iterator)), new OffsetDeleteResponseTopic() .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)), - ).asJava.iterator)) + ).iterator)), + ).iterator)) val expectedOffsetDeleteResponse = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(List( + .setTopics(new OffsetDeleteResponseTopicCollection(util.List.of( new OffsetDeleteResponseTopic() .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( // foo-2 is first because partitions failing the validation // are put in the response first. new OffsetDeleteResponsePartition() @@ -3371,30 +3916,30 @@ class KafkaApisTest extends Logging { new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)), + ).iterator)), // zar is before bar because topics failing the validation are // put in the response first. new OffsetDeleteResponseTopic() .setName("zar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - ).asJava.iterator)), + ).iterator)), new OffsetDeleteResponseTopic() .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava.iterator)), - ).asJava.iterator)) + ).iterator)), + ).iterator)) future.complete(offsetDeleteResponse) val response = verifyNoThrottling[OffsetDeleteResponse](requestChannelRequest) @@ -3413,7 +3958,7 @@ class KafkaApisTest extends Logging { val topics = new OffsetDeleteRequestTopicCollection() topics.add(new OffsetDeleteRequestTopic() .setName(topic) - .setPartitions(Collections.singletonList( + .setPartitions(util.List.of( new OffsetDeleteRequestPartition().setPartitionIndex(invalidPartitionId)))) val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() @@ -3484,9 +4029,9 @@ class KafkaApisTest extends Logging { val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(Collections.singletonList(new OffsetDeleteRequestTopic() + .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of(new OffsetDeleteRequestTopic() .setName("topic-unknown") - .setPartitions(Collections.singletonList(new OffsetDeleteRequestPartition() + .setPartitions(util.List.of(new OffsetDeleteRequestPartition() .setPartitionIndex(0) )) ).iterator())) @@ -3534,15 +4079,15 @@ class KafkaApisTest extends Logging { .setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) + callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) }) - val targetTimes = List(new ListOffsetsTopic() + val targetTimes = util.List.of(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(List(new ListOffsetsPartition() + .setPartitions(util.List.of(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP) - .setCurrentLeaderEpoch(currentLeaderEpoch.get)).asJava)).asJava + .setCurrentLeaderEpoch(currentLeaderEpoch.get)))) val listOffsetRequest = ListOffsetsRequest.Builder.forConsumer(true, isolationLevel) .setTargetTimes(targetTimes).build() val request = buildRequest(listOffsetRequest) @@ -3676,20 +4221,19 @@ class KafkaApisTest extends Logging { val unauthorizedTopic = "unauthorized-topic" val authorizedTopic = "authorized-topic" - val expectedActions = Seq( + val expectedActions = util.List.of( new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, unauthorizedTopic, PatternType.LITERAL), 1, true, true), new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions.asJava)))) + when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions)))) .thenAnswer { invocation => - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]].asScala - actions.map { action => - if (action.resourcePattern().name().equals(authorizedTopic)) - AuthorizationResult.ALLOWED - else - AuthorizationResult.DENIED - }.asJava + val actions = invocation.getArgument(1, classOf[util.List[Action]]) + val results = new util.ArrayList[AuthorizationResult]() + actions.forEach { a => + results.add(if (a.resourcePattern.name == authorizedTopic) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED) + } + results } // 3. Set up MetadataCache @@ -3704,15 +4248,15 @@ class KafkaApisTest extends Logging { .setPartitionId(0) .setLeader(0) .setLeaderEpoch(0) - .setReplicas(Collections.singletonList(0)) - .setIsr(Collections.singletonList(0)) + .setReplicas(util.List.of(0)) + .setIsr(util.List.of(0)) } val partitionRecords = Seq(authorizedTopicId, unauthorizedTopicId).map(createDummyPartitionRecord) MetadataCacheTest.updateCache(metadataCache, partitionRecords) // 4. Send TopicMetadataReq using topicId - val metadataReqByTopicId = new MetadataRequest.Builder(util.Arrays.asList(authorizedTopicId, unauthorizedTopicId)).build() + val metadataReqByTopicId = MetadataRequest.Builder.forTopicIds(util.Set.of(authorizedTopicId, unauthorizedTopicId)).build() val repByTopicId = buildRequest(metadataReqByTopicId, plaintextListener) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) @@ -3737,7 +4281,7 @@ class KafkaApisTest extends Logging { // 4. Send TopicMetadataReq using topic name reset(clientRequestQuotaManager, requestChannel) - val metadataReqByTopicName = new MetadataRequest.Builder(util.Arrays.asList(authorizedTopic, unauthorizedTopic), false).build() + val metadataReqByTopicName = new MetadataRequest.Builder(util.List.of(authorizedTopic, unauthorizedTopic), false).build() val repByTopicName = buildRequest(metadataReqByTopicName, plaintextListener) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleTopicMetadataRequest(repByTopicName) @@ -3748,7 +4292,7 @@ class KafkaApisTest extends Logging { metadataByTopicName.foreach { case (topicName, metadataResponseTopic) => if (topicName == unauthorizedTopic) { assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), metadataResponseTopic.errorCode()) - // Do not return topic Id on unauthorized error + // Do not return topicId on unauthorized error assertEquals(Uuid.ZERO_UUID, metadataResponseTopic.topicId()) } else { assertEquals(Errors.NONE.code(), metadataResponseTopic.errorCode()) @@ -3784,10 +4328,10 @@ class KafkaApisTest extends Logging { Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))) }) - val fetchData = Map(tidp -> new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, - Optional.empty())).asJava - val fetchDataBuilder = Map(tp -> new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, - Optional.empty())).asJava + val fetchData = util.Map.of(tidp, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, + Optional.empty())) + val fetchDataBuilder = util.Map.of(tp, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, + Optional.empty())) val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, false, false) @@ -3800,7 +4344,7 @@ class KafkaApisTest extends Logging { any[util.Map[Uuid, String]])).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val fetchRequest = new FetchRequest.Builder(9, 9, -1, -1, 100, 0, fetchDataBuilder) .build() @@ -3836,10 +4380,10 @@ class KafkaApisTest extends Logging { when(replicaManager.getLogConfig(ArgumentMatchers.eq(unresolvedFoo.topicPartition))).thenReturn(None) // Simulate unknown topic ID in the context - val fetchData = Map(new TopicIdPartition(foo.topicId, new TopicPartition(null, foo.partition)) -> - new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())).asJava - val fetchDataBuilder = Map(foo.topicPartition -> new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, - Optional.empty())).asJava + val fetchData = util.Map.of(new TopicIdPartition(foo.topicId, new TopicPartition(null, foo.partition)), + new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())) + val fetchDataBuilder = util.Map.of(foo.topicPartition, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, + Optional.empty())) val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, true, replicaId >= 0) @@ -3848,13 +4392,13 @@ class KafkaApisTest extends Logging { ApiKeys.FETCH.latestVersion, fetchMetadata, replicaId >= 0, - Collections.singletonMap(foo, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())), - Collections.emptyList[TopicIdPartition], + util.Map.of(foo, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())), + util.List.of[TopicIdPartition], metadataCache.topicIdsToNames()) ).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) // If replicaId is -1 we will build a consumer request. Any non-negative replicaId will build a follower request. val replicaEpoch = if (replicaId < 0) -1 else 1 @@ -3884,7 +4428,7 @@ class KafkaApisTest extends Logging { addTopicToMetadataCache(tp.topic, numPartitions = 1, numBrokers = 3, topicId) when(replicaManager.getLogConfig(ArgumentMatchers.eq(tp))).thenReturn(Some(LogConfig.fromProps( - Collections.emptyMap(), + util.Map.of(), new Properties() ))) @@ -3907,10 +4451,10 @@ class KafkaApisTest extends Logging { Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))) }) - val fetchData = Map(tidp -> new FetchRequest.PartitionData(topicId, 0, 0, 1000, - Optional.empty())).asJava - val fetchDataBuilder = Map(tp -> new FetchRequest.PartitionData(topicId, 0, 0, 1000, - Optional.empty())).asJava + val fetchData = util.Map.of(tidp, new FetchRequest.PartitionData(topicId, 0, 0, 1000, + Optional.empty())) + val fetchDataBuilder = util.Map.of(tp, new FetchRequest.PartitionData(topicId, 0, 0, 1000, + Optional.empty())) val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, true, false) @@ -3923,7 +4467,7 @@ class KafkaApisTest extends Logging { any[util.Map[Uuid, String]])).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val fetchRequest = new FetchRequest.Builder(16, 16, -1, -1, 100, 0, fetchDataBuilder) .build() @@ -3948,7 +4492,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -3957,46 +4501,42 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName))) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex))))) + .setPartitionIndex(partitionIndex)).iterator))).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4004,19 +4544,20 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) } @Test def testHandleShareFetchRequestInvalidRequestOnInitialEpoch(): Unit = { val topicName = "foo" val topicId = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4026,41 +4567,41 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( new TopicIdPartition(topicId, partitionIndex, topicName), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenThrow( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenThrow( Errors.INVALID_REQUEST.exception() ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2 + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2 ))) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) .setAcknowledgementBatches(util.List.of( @@ -4069,16 +4610,12 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4090,13 +4627,13 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )) - )) + ).iterator)) + ).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4107,12 +4644,13 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) } @Test @@ -4120,7 +4658,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4129,56 +4667,52 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ).thenThrow(Errors.INVALID_REQUEST.exception) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4186,30 +4720,31 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(-1). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator)) + ).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4225,7 +4760,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4233,32 +4768,28 @@ class KafkaApisTest extends Logging { FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4271,7 +4802,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4280,18 +4811,18 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( @@ -4302,21 +4833,21 @@ class KafkaApisTest extends Logging { cachedSharePartitions.mustAdd(new CachedSharePartition( new TopicIdPartition(topicId, partitionIndex, topicName), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -4325,16 +4856,12 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4347,7 +4874,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4365,21 +4892,21 @@ class KafkaApisTest extends Logging { cachedSharePartitions.mustAdd(new CachedSharePartition( new TopicIdPartition(topicId, partitionIndex, topicName), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -4388,16 +4915,12 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4410,48 +4933,44 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val records = MemoryRecords.EMPTY when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.REPLICA_NOT_AVAILABLE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List().asJava)) - ).asJava) + .setAcquiredRecords(new util.ArrayList(util.List.of)) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4459,12 +4978,13 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.REPLICA_NOT_AVAILABLE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertTrue(topicResponses.get(0).partitions.get(0).acquiredRecords.toArray().isEmpty) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.REPLICA_NOT_AVAILABLE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertTrue(topicResponse.partitions.get(0).acquiredRecords.toArray().isEmpty) } @Test @@ -4472,7 +4992,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4480,47 +5000,43 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, partitionIndex, topicName) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, partitionIndex, topicName), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4528,12 +5044,13 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) val memberId2 = Uuid.randomUuid() @@ -4542,11 +5059,11 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId2.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4562,7 +5079,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4570,47 +5087,43 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4618,30 +5131,63 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). // Invalid share session epoch, should have 1 for the second request. - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopicId(topicId). + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(0)).iterator))).iterator)) + + shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) + request = buildRequest(shareFetchRequest) + kafkaApis.handleShareFetchRequest(request) + response = verifyNoThrottling[ShareFetchResponse](request) + responseData = response.data() + + assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, responseData.errorCode) + } + + @Test + def testHandleShareFetchRequestWhenShareSessionCacheIsFull(): Unit = { + val topicId = Uuid.randomUuid() + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache("foo", 1, topicId = topicId) + + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) + .thenThrow(Errors.SHARE_SESSION_LIMIT_REACHED.exception) + + when(sharePartitionManager.createIdleShareFetchTimerTask(anyLong())) + .thenReturn(CompletableFuture.completedFuture(null)) + + val shareFetchRequestData = new ShareFetchRequestData(). + setGroupId("group"). + setMemberId(Uuid.randomUuid.toString). + setShareSessionEpoch(0). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) - shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - request = buildRequest(shareFetchRequest) + val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) + val request = buildRequest(shareFetchRequest) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) - response = verifyNoThrottling[ShareFetchResponse](request) - responseData = response.data() + val response = verifyNoThrottling[ShareFetchResponse](request) + val responseData = response.data() - assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, responseData.errorCode) + assertEquals(Errors.SHARE_SESSION_LIMIT_REACHED.code, responseData.errorCode) } @Test @@ -4649,7 +5195,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() @@ -4660,62 +5206,62 @@ class KafkaApisTest extends Logging { val records3 = memoryRecords(10, 20) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records3) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(20) .setLastOffset(29) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] @@ -4723,38 +5269,34 @@ class KafkaApisTest extends Logging { new TopicIdPartition(topicId, partitionIndex, topicName), false) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 10L, 3)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 3)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex))))) + .setPartitionIndex(partitionIndex)).iterator))).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4762,8 +5304,9 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) + var topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) compareResponsePartitions( partitionIndex, @@ -4771,23 +5314,23 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records1, expectedAcquiredRecords(0, 9, 1), - topicResponses.get(0).partitions.get(0) + topicResponse.partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(0). setLastOffset(9). - setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava))))))) + setAcknowledgeTypes(util.List.of[java.lang.Byte](1.toByte))))).iterator))).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4799,8 +5342,9 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) + topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) compareResponsePartitions( partitionIndex, @@ -4808,23 +5352,23 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records2, expectedAcquiredRecords(10, 19, 1), - topicResponses.get(0).partitions.get(0) + topicResponse.partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). setAcknowledgementBatches(util.List.of( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(10). setLastOffset(19). - setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava))))))) + setAcknowledgeTypes(util.List.of[java.lang.Byte](1.toByte))))).iterator))).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4836,8 +5380,9 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) + topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) compareResponsePartitions( partitionIndex, @@ -4845,7 +5390,7 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records3, expectedAcquiredRecords(20, 29, 1), - topicResponses.get(0).partitions.get(0) + topicResponse.partitions.get(0) ) } @@ -4863,7 +5408,7 @@ class KafkaApisTest extends Logging { val topicName4 = "foo4" val topicId4 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 2, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -4884,84 +5429,84 @@ class KafkaApisTest extends Logging { val groupId = "group" when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> + ))), + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p2_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ).asJava)), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> + ))), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p1_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(43) .setLastOffset(52) .setDeliveryCount(1) - ).asJava)), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> + ))), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p2_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(17) .setLastOffset(26) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t3_p1_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(54) .setLastOffset(73) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t3_p1_2) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(74) .setLastOffset(93) .setDeliveryCount(1) - ).asJava)), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> + ))), + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t4_p1_1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(24) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) val cachedSharePartitions1 = new ImplicitLinkedHashCollection[CachedSharePartition] @@ -4989,7 +5534,7 @@ class KafkaApisTest extends Logging { new TopicIdPartition(topicId4, 0, topicName4), false )) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), @@ -4997,87 +5542,83 @@ class KafkaApisTest extends Logging { new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 0L, 0L, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions2, 0L, 0L, 3)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions2, 3)) ).thenReturn(new FinalContext()) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - ).asJava) + )) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )) - )) + ).iterator)) + ).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5149,14 +5690,14 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), - )) + ).iterator)), + ).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5167,8 +5708,9 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId3, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId3, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) compareResponsePartitions( 0, @@ -5176,21 +5718,21 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records_t3_p1_1, expectedAcquiredRecords(54, 73, 1), - topicResponses.get(0).partitions.get(0) + topicResponse.partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), - )) + ).iterator)), + ).iterator)) .setForgottenTopicsData(util.List.of( new ForgottenTopic() .setTopicId(topicId1) @@ -5242,10 +5784,10 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(-1). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -5262,10 +5804,10 @@ class KafkaApisTest extends Logging { .setLastOffset(19) .setAcknowledgeTypes(util.List.of(1.toByte)), )) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -5282,10 +5824,10 @@ class KafkaApisTest extends Logging { .setLastOffset(26) .setAcknowledgeTypes(util.List.of(1.toByte)), )) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -5294,10 +5836,10 @@ class KafkaApisTest extends Logging { .setLastOffset(93) .setAcknowledgeTypes(util.List.of(1.toByte)), )), - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -5306,8 +5848,8 @@ class KafkaApisTest extends Logging { .setLastOffset(24) .setAcknowledgeTypes(util.List.of(1.toByte)), )), - )), - )) + ).iterator)), + ).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5327,7 +5869,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -5343,41 +5885,41 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1 -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)), - tp2 -> + ))), + tp2, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(14) .setDeliveryCount(1) - ).asJava)), - tp3 -> + ))), + tp3, new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p2) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(19) .setDeliveryCount(1) - ).asJava)), - ).asJava) + ))), + )) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() @@ -5395,31 +5937,27 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )), - )) + ).iterator)), + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -5479,7 +6017,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -5493,29 +6031,26 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1 -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) - val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - erroneousPartitions.put( + val erroneousPartitions = util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( tp2, new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - ) - erroneousPartitions.put( + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), tp3, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) @@ -5535,31 +6070,27 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), - )), - )) + ).iterator)), + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -5613,7 +6144,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -5627,26 +6158,26 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1 -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(List().asJava)), - tp2 -> + .setAcquiredRecords(new util.ArrayList(util.List.of)), + tp2, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(List().asJava)), - tp3 -> + .setAcquiredRecords(new util.ArrayList(util.List.of)), + tp3, new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(List().asJava)) - ).asJava) + .setAcquiredRecords(new util.ArrayList(util.List.of)) + )) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() @@ -5664,31 +6195,27 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )), - )) + ).iterator)), + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -5709,7 +6236,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - Collections.emptyList[AcquiredRecords](), + util.List.of[AcquiredRecords](), partitionData1 ) @@ -5722,7 +6249,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - Collections.emptyList[AcquiredRecords](), + util.List.of[AcquiredRecords](), partitionData2 ) @@ -5735,7 +6262,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - Collections.emptyList[AcquiredRecords](), + util.List.of[AcquiredRecords](), partitionData3 ) } @@ -5750,7 +6277,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val topicId3 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) // topicName3 is not in the metadataCache. @@ -5767,30 +6294,30 @@ class KafkaApisTest extends Logging { val tp4 = new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp2 -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp2, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)), - tp3 -> + ))), + tp3, new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(19) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() @@ -5809,36 +6336,32 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - )), - )) + ).iterator)), + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -5859,7 +6382,7 @@ class KafkaApisTest extends Logging { Errors.TOPIC_AUTHORIZATION_FAILED.code, Errors.NONE.code, MemoryRecords.EMPTY, - Collections.emptyList[AcquiredRecords](), + util.List.of[AcquiredRecords](), partitionData1 ) @@ -5898,7 +6421,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_TOPIC_OR_PARTITION.code, Errors.NONE.code, MemoryRecords.EMPTY, - Collections.emptyList[AcquiredRecords](), + util.List.of[AcquiredRecords](), partitionData4 ) } @@ -5930,7 +6453,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() @@ -5940,33 +6463,33 @@ class KafkaApisTest extends Logging { val groupId = "group" when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(List( + .setAcquiredRecords(new util.ArrayList(util.List.of( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ).asJava)) - ).asJava) + ))) + )) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] @@ -5974,43 +6497,39 @@ class KafkaApisTest extends Logging { new TopicIdPartition(topicId, 0, topicName), false )) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( new TopicIdPartition(topicId, partitionIndex, topicName) )) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - ).asJava) + )) ) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0))))) + .setPartitionIndex(0)).iterator))).iterator)) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -6018,20 +6537,21 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(records1, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + var topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(records1, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6040,8 +6560,8 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -6052,13 +6572,14 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).acknowledgeErrorCode) - assertEquals(records2, topicResponses.get(0).partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) + topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).acknowledgeErrorCode) + assertEquals(records2, topicResponse.partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) } @Test @@ -6067,15 +6588,28 @@ class KafkaApisTest extends Logging { val memberId: Uuid = Uuid.randomUuid() val groupId = "group" - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = { + val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) + val delta = new MetadataDelta(MetadataImage.EMPTY) + delta.replay(new FeatureLevelRecord() + .setName(MetadataVersion.FEATURE_NAME) + .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) + ) + delta.replay(new FeatureLevelRecord() + .setName(ShareVersion.FEATURE_NAME) + .setFeatureLevel(ShareVersion.SV_0.featureLevel()) + ) + cache.setImage(delta.apply(MetadataProvenance.EMPTY)) + cache + } val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6084,17 +6618,13 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) @@ -6108,20 +6638,20 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) .setAcknowledgementBatches(util.List.of( @@ -6130,21 +6660,18 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(1.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any(), any())).thenReturn(List[AuthorizationResult]( + when(authorizer.authorize(any(), any())).thenReturn(util.List.of[AuthorizationResult]( AuthorizationResult.DENIED - ).asJava) + )) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareFetchRequest) kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), ) kafkaApis.handleShareFetchRequest(request) @@ -6160,22 +6687,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val groupId = "group" when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - ).asJava) + )) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( new FinalContext() ) @@ -6184,32 +6711,28 @@ class KafkaApisTest extends Logging { ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava)).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -6217,13 +6740,14 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).acknowledgeErrorCode) - assertNull(topicResponses.get(0).partitions.get(0).records) - assertEquals(0, topicResponses.get(0).partitions.get(0).acquiredRecords.toArray().length) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).acknowledgeErrorCode) + assertEquals(MemoryRecords.EMPTY, topicResponse.partitions.get(0).records) + assertEquals(0, topicResponse.partitions.get(0).acquiredRecords.toArray().length) } @Test @@ -6231,22 +6755,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -6255,28 +6779,23 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -6284,10 +6803,11 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) } @Test @@ -6296,34 +6816,42 @@ class KafkaApisTest extends Logging { val memberId: Uuid = Uuid.randomUuid() val groupId = "group" - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = { + val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) + val delta = new MetadataDelta(MetadataImage.EMPTY) + delta.replay(new FeatureLevelRecord() + .setName(MetadataVersion.FEATURE_NAME) + .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) + ) + delta.replay(new FeatureLevelRecord() + .setName(ShareVersion.FEATURE_NAME) + .setFeatureLevel(ShareVersion.SV_0.featureLevel()) + ) + cache.setImage(delta.apply(MetadataProvenance.EMPTY)) + cache + } val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() .setGroupId(groupId) .setMemberId(memberId.toString) .setShareSessionEpoch(1) - .setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6337,43 +6865,39 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any(), any())).thenReturn(List[AuthorizationResult]( + when(authorizer.authorize(any(), any())).thenReturn(util.List.of[AuthorizationResult]( AuthorizationResult.DENIED - ).asJava) + )) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), ) kafkaApis.handleShareAcknowledgeRequest(request) @@ -6389,12 +6913,12 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledgeSessionUpdate(any(), any())).thenThrow( Errors.INVALID_SHARE_SESSION_EPOCH.exception @@ -6404,29 +6928,24 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6440,12 +6959,12 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledgeSessionUpdate(any(), any())).thenThrow( Errors.SHARE_SESSION_NOT_FOUND.exception @@ -6455,29 +6974,24 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6491,13 +7005,13 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val groupId: String = "group" val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -6505,28 +7019,23 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(4) // end offset is less than base offset - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6535,10 +7044,11 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.INVALID_REQUEST.code, topicResponses.get(0).partitions.get(0).errorCode) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.INVALID_REQUEST.code, topicResponse.partitions.get(0).errorCode) } @Test @@ -6548,7 +7058,7 @@ class KafkaApisTest extends Logging { val partitionIndex = 0 val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) val topicPartition = topicIdPartition.topicPartition - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicPartition.topic, numPartitions = 1, numBrokers = 3, topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -6566,42 +7076,37 @@ class KafkaApisTest extends Logging { any(), any(), any() - )).thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + )).thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()) - ).asJava)) + ))) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(20) - .setAcknowledgeTypes(util.Arrays.asList(1.toByte,1.toByte,0.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte,1.toByte,0.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6610,12 +7115,13 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, topicResponses.get(0).partitions.get(0).errorCode) - assertEquals(newLeaderId, topicResponses.get(0).partitions.get(0).currentLeader.leaderId) - assertEquals(newLeaderEpoch, topicResponses.get(0).partitions.get(0).currentLeader.leaderEpoch) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, topicResponse.partitions.get(0).errorCode) + assertEquals(newLeaderId, topicResponse.partitions.get(0).currentLeader.leaderId) + assertEquals(newLeaderEpoch, topicResponse.partitions.get(0).currentLeader.leaderEpoch) assertEquals(2, responseData.nodeEndpoints.asScala.head.nodeId) } @@ -6624,14 +7130,14 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) @@ -6643,28 +7149,23 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -6677,61 +7178,56 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -6739,10 +7235,11 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) } @Test @@ -6750,22 +7247,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> + CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava) + )) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -6778,28 +7275,23 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator))).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -6807,10 +7299,11 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - assertEquals(topicId, topicResponses.get(0).topicId) - assertEquals(1, topicResponses.get(0).partitions.size()) - assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + val topicResponse = topicResponses.stream.findFirst.get + assertEquals(topicId, topicResponse.topicId) + assertEquals(1, topicResponse.partitions.size()) + assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) } private def expectedAcquiredRecords(firstOffset: Long, lastOffset: Long, deliveryCount: Int): util.List[AcquiredRecords] = { @@ -6826,15 +7319,15 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareFetchRequest(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6855,10 +7348,10 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of(2.toByte)) )) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6869,19 +7362,13 @@ class KafkaApisTest extends Logging { )), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - )) - )) + ).iterator)) + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicNames = new util.HashMap[Uuid, String] - topicNames.put(topicId1, "foo1") - topicNames.put(topicId2, "foo2") + val topicNames = util.Map.of(topicId1, "foo1", topicId2, "foo2") val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicNames, erroneous) assertEquals(4, acknowledgeBatches.size) @@ -6899,15 +7386,15 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareFetchRequestError(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(util.List.of( + setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(util.List.of( + setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6924,10 +7411,10 @@ class KafkaApisTest extends Logging { .setLastOffset(9) .setAcknowledgeTypes(util.List.of()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) )) - )), + ).iterator)), new ShareFetchRequestData.FetchTopic() .setTopicId(topicId2) - .setPartitions(util.List.of( + .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) .setAcknowledgementBatches(util.List.of( @@ -6936,18 +7423,13 @@ class KafkaApisTest extends Logging { .setLastOffset(65) .setAcknowledgeTypes(util.List.of(3.toByte)) )) - )) - )) + ).iterator)) + ).iterator)) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicIdNames = new util.HashMap[Uuid, String] - topicIdNames.put(topicId1, "foo1") // topicId2 is not present in topicIdNames + val topicIdNames = util.Map.of(topicId1, "foo1") // topicId2 is not present in topicIdNames val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -6966,61 +7448,55 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(List( + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + .setAcknowledgeTypes(util.List.of(1.toByte)), new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(17) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)) + )), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte)) - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of(2.toByte)) + )) + ).iterator)), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(Collections.singletonList(3.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(3.toByte)) + )) + ).iterator)) + ).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicNames = new util.HashMap[Uuid, String] - topicNames.put(topicId1, "foo1") - topicNames.put(topicId2, "foo2") + val topicNames = util.Map.of(topicId1, "foo1", topicId2, "foo2") val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicNames, erroneous) assertEquals(3, acknowledgeBatches.size) @@ -7038,56 +7514,51 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareAcknowledgeRequestError(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(List( + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - ).asJava), + .setAcknowledgeTypes(util.List.of(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + )), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.emptyList()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + )) + ).iterator)), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(Collections.singletonList(3.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(3.toByte)) + )) + ).iterator)) + ).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicIdNames = new util.HashMap[Uuid, String] - topicIdNames.put(topicId1, "foo1") // topicId2 not present in topicIdNames + val topicIdNames = util.Map.of(topicId1, "foo1") // topicId2 not present in topicIdNames val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -7114,7 +7585,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7123,43 +7594,39 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - tp1 -> + .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + tp1, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp2 -> + tp2, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp3 -> + tp3, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava)) + ))) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.Arrays.asList( - new ShareAcknowledgementBatch(0, 9, Collections.singletonList(1.toByte)), - new ShareAcknowledgementBatch(10, 19, Collections.singletonList(2.toByte)) + acknowledgementData += (tp1 -> util.List.of( + new ShareAcknowledgementBatch(0, 9, util.List.of(1.toByte)), + new ShareAcknowledgementBatch(10, 19, util.List.of(2.toByte)) )) - acknowledgementData += (tp2 -> util.Arrays.asList( - new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) + acknowledgementData += (tp2 -> util.List.of( + new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) )) - acknowledgementData += (tp3 -> util.Arrays.asList( - new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)) + acknowledgementData += (tp3 -> util.List.of( + new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7193,7 +7660,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7202,43 +7669,39 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)) -> + .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)) -> + new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)) -> + new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava)) + ))) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.Arrays.asList( - new ShareAcknowledgementBatch(39, 24, Collections.singletonList(1.toByte)), // this is an invalid batch because last offset is less than base offset - new ShareAcknowledgementBatch(43, 56, Collections.singletonList(2.toByte)) + acknowledgementData += (tp1 -> util.List.of( + new ShareAcknowledgementBatch(39, 24, util.List.of(1.toByte)), // this is an invalid batch because last offset is less than base offset + new ShareAcknowledgementBatch(43, 56, util.List.of(2.toByte)) )) - acknowledgementData += (tp2 -> util.Arrays.asList( - new ShareAcknowledgementBatch(5, 19, util.Arrays.asList(0.toByte, 2.toByte)) + acknowledgementData += (tp2 -> util.List.of( + new ShareAcknowledgementBatch(5, 19, util.List.of(0.toByte, 2.toByte)) )) - acknowledgementData += (tp3 -> util.Arrays.asList( - new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)), - new ShareAcknowledgementBatch(10, 19, Collections.singletonList(1.toByte)) // this is an invalid batch because start is offset is less than previous end offset + acknowledgementData += (tp3 -> util.List.of( + new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)), + new ShareAcknowledgementBatch(10, 19, util.List.of(1.toByte)) // this is an invalid batch because start is offset is less than previous end offset )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7272,7 +7735,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() // Topic with id topicId1 is not present in Metadata Cache addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7281,44 +7744,40 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)) -> + .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)) -> + new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)) -> + new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)), new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).asJava)) + ))) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.Arrays.asList( - new ShareAcknowledgementBatch(24, 39, Collections.singletonList(1.toByte)), - new ShareAcknowledgementBatch(43, 56, Collections.singletonList(2.toByte)) + acknowledgementData += (tp1 -> util.List.of( + new ShareAcknowledgementBatch(24, 39, util.List.of(1.toByte)), + new ShareAcknowledgementBatch(43, 56, util.List.of(2.toByte)) )) - acknowledgementData += (tp2 -> util.Arrays.asList( - new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) + acknowledgementData += (tp2 -> util.List.of( + new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) )) - acknowledgementData += (tp3 -> util.Arrays.asList( - new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)), - new ShareAcknowledgementBatch(67, 87, Collections.singletonList(1.toByte)) + acknowledgementData += (tp3 -> util.List.of( + new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)), + new ShareAcknowledgementBatch(67, 87, util.List.of(1.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1) // Topic with topicId2 is not authorized val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7352,7 +7811,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7361,25 +7820,25 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - tp1 -> + .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + tp1, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp2 -> + tp2, new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ).asJava)) + ))) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.Arrays.asList( - new ShareAcknowledgementBatch(0, 9, Collections.singletonList(1.toByte)), - new ShareAcknowledgementBatch(10, 19, Collections.singletonList(2.toByte)) + acknowledgementData += (tp1 -> util.List.of( + new ShareAcknowledgementBatch(0, 9, util.List.of(1.toByte)), + new ShareAcknowledgementBatch(10, 19, util.List.of(2.toByte)) )) - acknowledgementData += (tp2 -> util.Arrays.asList( - new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) + acknowledgementData += (tp2 -> util.List.of( + new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) @@ -7388,11 +7847,7 @@ class KafkaApisTest extends Logging { erroneous += (tp3 -> ShareAcknowledgeResponse.partitionResponse(tp3, Errors.UNKNOWN_TOPIC_ID)) - kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7424,7 +7879,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val responseAcknowledgeData: mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData] = mutable.Map() responseAcknowledgeData += (new TopicIdPartition(topicId1, new TopicPartition("foo", 0)) -> new ShareAcknowledgeResponseData.PartitionData().setPartitionIndex(0).setErrorCode(Errors.NONE.code)) @@ -7439,57 +7894,54 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(List( + setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)) + )), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator)), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(List( + setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava), + .setAcknowledgeTypes(util.List.of(1.toByte)) + )), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(List( + .setAcknowledgementBatches(util.List.of( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte)) - ).asJava) - ).asJava) - ).asJava) + .setAcknowledgeTypes(util.List.of(1.toByte)) + )) + ).iterator)) + ).iterator)) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) kafkaApis = createKafkaApis( - overrideProperties = Map( - ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + ) val response = kafkaApis.processShareAcknowledgeResponse(responseAcknowledgeData, request) val responseData = response.data() val topicResponses = responseData.responses() @@ -7526,12 +7978,12 @@ class KafkaApisTest extends Logging { private def compareAcknowledgementBatches(baseOffset: Long, endOffset: Long, - acknowledgementType: Byte, + acknowledgeType: Byte, acknowledgementBatch: ShareAcknowledgementBatch ): Boolean = { if (baseOffset == acknowledgementBatch.firstOffset() && endOffset == acknowledgementBatch.lastOffset() - && acknowledgementType == acknowledgementBatch.acknowledgeTypes().get(0)) { + && acknowledgeType == acknowledgementBatch.acknowledgeTypes().get(0)) { return true } false @@ -7694,7 +8146,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleJoinGroupRequest( requestChannelRequest, @@ -7825,7 +8277,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleSyncGroupRequest( requestChannelRequest, @@ -7943,7 +8395,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleHeartbeatRequest( requestChannelRequest @@ -7959,15 +8411,14 @@ class KafkaApisTest extends Logging { def makeRequest(version: Short): RequestChannel.Request = { buildRequest(new LeaveGroupRequest.Builder( "group", - List( + util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new MemberIdentity() .setMemberId("member-2") .setGroupInstanceId("instance-2") - ).asJava - ).build(version)) + )).build(version)) } if (version < 3) { @@ -7978,14 +8429,14 @@ class KafkaApisTest extends Logging { val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(List( + .setMembers(util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new MemberIdentity() .setMemberId("member-2") .setGroupInstanceId("instance-2") - ).asJava) + )) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -7997,14 +8448,14 @@ class KafkaApisTest extends Logging { val expectedLeaveResponse = new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(List( + .setMembers(util.List.of( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new LeaveGroupResponseData.MemberResponse() .setMemberId("member-2") .setGroupInstanceId("instance-2"), - ).asJava) + )) future.complete(expectedLeaveResponse) val response = verifyNoThrottling[LeaveGroupResponse](requestChannelRequest) @@ -8017,20 +8468,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupWithSingleMember(version: Short): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - List( + util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava + ) ).build(version)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(List( + .setMembers(util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId(if (version >= 3) "instance-1" else null) - ).asJava) + )) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8042,20 +8493,20 @@ class KafkaApisTest extends Logging { val leaveGroupResponse = new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(List( + .setMembers(util.List.of( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava) + )) val expectedLeaveResponse = if (version >= 3) { new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(List( + .setMembers(util.List.of( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava) + )) } else { new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) @@ -8070,20 +8521,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupFutureFailed(): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - List( + util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava + ) ).build(ApiKeys.LEAVE_GROUP.latestVersion)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(List( + .setMembers(util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava) + )) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8102,20 +8553,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupAuthenticationFailed(): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - List( + util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava + ) ).build(ApiKeys.LEAVE_GROUP.latestVersion)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(List( + .setMembers(util.List.of( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ).asJava) + )) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8125,7 +8576,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleLeaveGroupRequest(requestChannelRequest) @@ -8136,17 +8587,38 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) def testHandleOffsetFetchWithMultipleGroups(version: Short): Unit = { + val foo = "foo" + val bar = "bar" + val fooId = Uuid.randomUuid() + addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + def makeRequest(version: Short): RequestChannel.Request = { - val groups = Map( - "group-1" -> List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1) - ).asJava, - "group-2" -> null, - "group-3" -> null, - "group-4" -> null, - ).asJava - buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) + buildRequest( + OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0, 1)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-2") + .setTopics(null), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-3") + .setTopics(null), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-4") + .setTopics(null), + )), + false, + true + ).build(version) + ) } if (version < 8) { @@ -8160,10 +8632,11 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) .setName("foo") - .setPartitionIndexes(List[Integer](0, 1).asJava)).asJava), + .setPartitionIndexes(util.List.of[Integer](0, 1)))), false )).thenReturn(group1Future) @@ -8194,14 +8667,33 @@ class KafkaApisTest extends Logging { false )).thenReturn(group4Future) kafkaApis = createKafkaApis() - kafkaApis.handleOffsetFetchRequest(requestChannelRequest) + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List( + .setTopicId(fooId) + .setName(foo) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(200) + .setCommittedLeaderEpoch(2) + )) + )) + + val expectedGroup1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) + .setName(if (version < 10) foo else "") + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8210,15 +8702,15 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - ).asJava) - ).asJava) + )) + )) val group2Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-2") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List( + .setName(bar) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8231,8 +8723,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(2) .setCommittedOffset(300) .setCommittedLeaderEpoch(3) - ).asJava) - ).asJava) + )) + )) val group3Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") @@ -8242,7 +8734,7 @@ class KafkaApisTest extends Logging { .setGroupId("group-4") .setErrorCode(Errors.INVALID_GROUP_ID.code) - val expectedGroups = List(group1Response, group2Response, group3Response, group4Response) + val expectedGroups = List(expectedGroup1Response, group2Response, group3Response, group4Response) group1Future.complete(group1Response) group2Future.complete(group2Response) @@ -8250,21 +8742,171 @@ class KafkaApisTest extends Logging { group4Future.complete(group4Response) val response = verifyNoThrottling[OffsetFetchResponse](requestChannelRequest) - assertEquals(expectedGroups.toSet, response.data.groups().asScala.toSet) + assertEquals(expectedGroups.toSet, response.data.groups.asScala.toSet) } } @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + // We only test with topic ids. + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 10) + def testHandleOffsetFetchWithUnknownTopicIds(version: Short): Unit = { + val foo = "foo" + val bar = "bar" + val fooId = Uuid.randomUuid() + val barId = Uuid.randomUuid() + addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + + def makeRequest(version: Short): RequestChannel.Request = { + buildRequest( + OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0)), + // bar does not exist so it must return UNKNOWN_TOPIC_ID. + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(barId) + .setPartitionIndexes(util.List.of[Integer](0)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-2") + .setTopics(null) + )), + false, + true + ).build(version) + ) + } + + val requestChannelRequest = makeRequest(version) + + val group1Future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() + when(groupCoordinator.fetchOffsets( + requestChannelRequest.context, + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setTopicId(fooId) + .setName("foo") + .setPartitionIndexes(util.List.of[Integer](0)))), + false + )).thenReturn(group1Future) + + val group2Future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() + when(groupCoordinator.fetchAllOffsets( + requestChannelRequest.context, + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-2") + .setTopics(null), + false + )).thenReturn(group2Future) + + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(fooId) + .setName(foo) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + )) + )) + + val group2Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-2") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(foo) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + )), + // bar does not exist so it must be filtered out. + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(bar) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + )) + )) + + val expectedResponse = new OffsetFetchResponseData() + .setGroups(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(fooId) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + )), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(barId) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(-1) + .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code) + )) + )), + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-2") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(fooId) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + )) + )) + )) + + group1Future.complete(group1Response) + group2Future.complete(group2Response) + + val response = verifyNoThrottling[OffsetFetchResponse](requestChannelRequest) + assertEquals(expectedResponse, response.data) + } + + @ParameterizedTest + // The single group builder does not support topic ids. + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, toVersion = 9) def testHandleOffsetFetchWithSingleGroup(version: Short): Unit = { def makeRequest(version: Short): RequestChannel.Request = { - buildRequest(new OffsetFetchRequest.Builder( - "group-1", - false, - List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1) - ).asJava, + buildRequest(OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setRequireStable(false) + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setPartitionIndexes(util.List.of[Integer](0, 1)) + )) + )), false ).build(version)) } @@ -8276,9 +8918,9 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() .setName("foo") - .setPartitionIndexes(List[Integer](0, 1).asJava)).asJava), + .setPartitionIndexes(util.List.of[Integer](0, 1)))), false )).thenReturn(future) kafkaApis = createKafkaApis() @@ -8286,10 +8928,10 @@ class KafkaApisTest extends Logging { val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8298,18 +8940,18 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - ).asJava) - ).asJava) + )) + )) val expectedOffsetFetchResponse = if (version >= 8) { new OffsetFetchResponseData() - .setGroups(List(group1Response).asJava) + .setGroups(util.List.of(group1Response)) } else { new OffsetFetchResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartition() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8318,8 +8960,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(if (version >= 5) 2 else -1) - ).asJava) - ).asJava) + )) + )) } future.complete(group1Response) @@ -8329,18 +8971,24 @@ class KafkaApisTest extends Logging { } @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + // Version 1 does not support fetching offsets for all topics. + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 2) def testHandleOffsetFetchAllOffsetsWithSingleGroup(version: Short): Unit = { - // Version 0 gets offsets from Zookeeper. Version 1 does not support fetching all - // offsets request. We are not interested in testing these here. - if (version < 2) return + val foo = "foo" + val fooId = Uuid.randomUuid() + addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) def makeRequest(version: Short): RequestChannel.Request = { - buildRequest(new OffsetFetchRequest.Builder( - "group-1", + buildRequest(OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setRequireStable(false) + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(null) // all offsets. + )), false, - null, // all offsets. - false + true ).build(version)) } @@ -8359,10 +9007,10 @@ class KafkaApisTest extends Logging { val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List( + .setName(foo) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8371,18 +9019,36 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - ).asJava) - ).asJava) + )) + )) val expectedOffsetFetchResponse = if (version >= 8) { new OffsetFetchResponseData() - .setGroups(List(group1Response).asJava) + .setGroups(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(if (version < 10) foo else "") + .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) + .setPartitions(util.List.of( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(200) + .setCommittedLeaderEpoch(2) + )) + )) + )) } else { new OffsetFetchResponseData() - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopic() .setName("foo") - .setPartitions(List( + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartition() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8391,8 +9057,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(if (version >= 5) 2 else -1) - ).asJava) - ).asJava) + )) + )) } future.complete(group1Response) @@ -8401,25 +9067,60 @@ class KafkaApisTest extends Logging { assertEquals(expectedOffsetFetchResponse, response.data) } - @Test - def testHandleOffsetFetchAuthorization(): Unit = { + @ParameterizedTest + // We don't test the non batched API. + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) + def testHandleOffsetFetchAuthorization(version: Short): Unit = { + val foo = "foo" + val bar = "bar" + val fooId = Uuid.randomUuid() + val barId = Uuid.randomUuid() + addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + addTopicToMetadataCache(bar, topicId = barId, numPartitions = 2) + def makeRequest(version: Short): RequestChannel.Request = { - val groups = Map( - "group-1" -> List( - new TopicPartition("foo", 0), - new TopicPartition("bar", 0) - ).asJava, - "group-2" -> List( - new TopicPartition("foo", 0), - new TopicPartition("bar", 0) - ).asJava, - "group-3" -> null, - "group-4" -> null, - ).asJava - buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) + buildRequest( + OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(barId) + .setPartitionIndexes(util.List.of[Integer](0)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-2") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(barId) + .setPartitionIndexes(util.List.of[Integer](0)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-3") + .setTopics(null), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-4") + .setTopics(null), + )), + false, + true + ).build(version) + ) } - val requestChannelRequest = makeRequest(ApiKeys.OFFSET_FETCH.latestVersion) + val requestChannelRequest = makeRequest(version) val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -8437,9 +9138,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } // group-1 is allowed and bar is allowed. @@ -8448,9 +9149,10 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("bar") - .setPartitionIndexes(List[Integer](0).asJava)).asJava), + .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) + .setPartitionIndexes(util.List.of[Integer](0)))), false )).thenReturn(group1Future) @@ -8468,62 +9170,67 @@ class KafkaApisTest extends Logging { val group1ResponseFromCoordinator = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List( + .setName(bar) + .setTopicId(barId) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) val group3ResponseFromCoordinator = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") - .setTopics(List( + .setTopics(util.List.of( // foo should be filtered out. new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List( + .setName(foo) + .setTopicId(fooId) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - ).asJava), + )), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List( + .setName(bar) + .setTopicId(barId) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) val expectedOffsetFetchResponse = new OffsetFetchResponseData() - .setGroups(List( + .setGroups(util.List.of( // group-1 is authorized but foo is not. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List( + .setName(if (version < 10) bar else "") + .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - ).asJava), + )), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List( + .setName(if (version < 10) foo else "") + .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) .setCommittedOffset(-1) - ).asJava) - ).asJava), + )) + )), // group-2 is not authorized. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-2") @@ -8531,21 +9238,22 @@ class KafkaApisTest extends Logging { // group-3 is authorized but foo is not. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") - .setTopics(List( + .setTopics(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List( + .setName(if (version < 10) bar else "") + .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) + .setPartitions(util.List.of( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - ).asJava) - ).asJava), + )) + )), // group-4 is not authorized. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-4") .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), - ).asJava) + )) group1Future.complete(group1ResponseFromCoordinator) group3Future.complete(group3ResponseFromCoordinator) @@ -8554,23 +9262,54 @@ class KafkaApisTest extends Logging { assertEquals(expectedOffsetFetchResponse, response.data) } - @Test - def testHandleOffsetFetchWithUnauthorizedTopicAndTopLevelError(): Unit = { + @ParameterizedTest + // We don't test the non batched API. + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) + def testHandleOffsetFetchWithUnauthorizedTopicAndTopLevelError(version: Short): Unit = { + val foo = "foo" + val bar = "bar" + val fooId = Uuid.randomUuid() + val barId = Uuid.randomUuid() + addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + addTopicToMetadataCache(bar, topicId = barId, numPartitions = 2) + def makeRequest(version: Short): RequestChannel.Request = { - val groups = Map( - "group-1" -> List( - new TopicPartition("foo", 0), - new TopicPartition("bar", 0) - ).asJava, - "group-2" -> List( - new TopicPartition("foo", 0), - new TopicPartition("bar", 0) - ).asJava - ).asJava - buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) + buildRequest( + OffsetFetchRequest.Builder.forTopicIdsOrNames( + new OffsetFetchRequestData() + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-1") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(barId) + .setPartitionIndexes(util.List.of[Integer](0)) + )), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group-2") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo) + .setTopicId(fooId) + .setPartitionIndexes(util.List.of[Integer](0)), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(barId) + .setPartitionIndexes(util.List.of[Integer](0)) + )) + )), + false, + true + ).build(version) + ) } - val requestChannelRequest = makeRequest(ApiKeys.OFFSET_FETCH.latestVersion) + val requestChannelRequest = makeRequest(version) val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -8586,9 +9325,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } // group-1 and group-2 are allowed and bar is allowed. @@ -8597,9 +9336,10 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("bar") - .setPartitionIndexes(List[Integer](0).asJava)).asJava), + .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) + .setPartitionIndexes(util.List.of[Integer](0)))), false )).thenReturn(group1Future) @@ -8608,9 +9348,10 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-2") - .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("bar") - .setPartitionIndexes(List[Integer](0).asJava)).asJava), + .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar) + .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) + .setPartitionIndexes(util.List.of[Integer](0)))), false )).thenReturn(group1Future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) @@ -8624,12 +9365,12 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code) val expectedOffsetFetchResponse = new OffsetFetchResponseData() - .setGroups(List( + .setGroups(util.List.of( new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code), group2ResponseFromCoordinator - ).asJava) + )) group1Future.completeExceptionally(Errors.COORDINATOR_NOT_AVAILABLE.exception) group2Future.complete(group2ResponseFromCoordinator) @@ -8657,8 +9398,8 @@ class KafkaApisTest extends Logging { setupBasicMetadataCache(tp0.topic, numPartitions = 1, 1, topicId) val hw = 3 - val fetchDataBuilder = Collections.singletonMap(tp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) - val fetchData = Collections.singletonMap(tidp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) + val fetchDataBuilder = util.Map.of(tp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) + val fetchData = util.Map.of(tidp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) val fetchFromFollower = buildRequest(new FetchRequest.Builder( ApiKeys.FETCH.oldestVersion(), ApiKeys.FETCH.latestVersion(), 1, 1, 1000, 0, fetchDataBuilder).build()) @@ -8703,14 +9444,14 @@ class KafkaApisTest extends Logging { @ApiKeyVersionsSource(apiKey = ApiKeys.LIST_GROUPS) def testListGroupsRequest(version: Short): Unit = { val listGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(if (version >= 4) List("Stable", "Empty").asJava else List.empty.asJava) - .setTypesFilter(if (version >= 5) List("classic", "consumer").asJava else List.empty.asJava) + .setStatesFilter(if (version >= 4) util.List.of("Stable", "Empty") else util.List.of) + .setTypesFilter(if (version >= 5) util.List.of("classic", "consumer") else util.List.of) val requestChannelRequest = buildRequest(new ListGroupsRequest.Builder(listGroupsRequest).build(version)) val expectedListGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(if (version >= 4) List("Stable", "Empty").asJava else List.empty.asJava) - .setTypesFilter(if (version >= 5) List("classic", "consumer").asJava else List.empty.asJava) + .setStatesFilter(if (version >= 4) util.List.of("Stable", "Empty") else util.List.of) + .setTypesFilter(if (version >= 5) util.List.of("classic", "consumer") else util.List.of) val future = new CompletableFuture[ListGroupsResponseData]() when(groupCoordinator.listGroups( @@ -8721,7 +9462,7 @@ class KafkaApisTest extends Logging { kafkaApis.handleListGroupsRequest(requestChannelRequest) val expectedListGroupsResponse = new ListGroupsResponseData() - .setGroups(List( + .setGroups(util.List.of( new ListGroupsResponseData.ListedGroup() .setGroupId("group1") .setProtocolType("protocol1") @@ -8737,7 +9478,7 @@ class KafkaApisTest extends Logging { .setProtocolType("protocol3") .setGroupState(if (version >= 4) "Stable" else "") .setGroupType(if (version >= 5) "classic" else ""), - ).asJava) + )) future.complete(expectedListGroupsResponse) val response = verifyNoThrottling[ListGroupsResponse](requestChannelRequest) @@ -8747,14 +9488,14 @@ class KafkaApisTest extends Logging { @Test def testListGroupsRequestFutureFailed(): Unit = { val listGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(List("Stable", "Empty").asJava) - .setTypesFilter(List("classic", "consumer").asJava) + .setStatesFilter(util.List.of("Stable", "Empty")) + .setTypesFilter(util.List.of("classic", "consumer")) val requestChannelRequest = buildRequest(new ListGroupsRequest.Builder(listGroupsRequest).build()) val expectedListGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(List("Stable", "Empty").asJava) - .setTypesFilter(List("classic", "consumer").asJava) + .setStatesFilter(util.List.of("Stable", "Empty")) + .setTypesFilter(util.List.of("classic", "consumer")) val future = new CompletableFuture[ListGroupsResponseData]() when(groupCoordinator.listGroups( @@ -8917,7 +9658,7 @@ class KafkaApisTest extends Logging { assertEquals(clusterId, describeClusterResponse.data.clusterId) assertEquals(8096, describeClusterResponse.data.clusterAuthorizedOperations) assertEquals(util.Set.copyOf(metadataCache.getAliveBrokerNodes(plaintextListener)), - describeClusterResponse.nodes.asScala.values.toSet.asJava) + util.Set.copyOf(describeClusterResponse.nodes.values)) } /** @@ -8971,11 +9712,11 @@ class KafkaApisTest extends Logging { private def testConsumerListOffsetWithUnsupportedVersion(timestamp: Long, version: Short): Unit = { val tp = new TopicPartition("foo", 0) - val targetTimes = List(new ListOffsetsTopic() + val targetTimes = util.List.of(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(List(new ListOffsetsPartition() + .setPartitions(util.List.of(new ListOffsetsPartition() .setPartitionIndex(tp.partition) - .setTimestamp(timestamp)).asJava)).asJava + .setTimestamp(timestamp)))) when(replicaManager.fetchOffset( ArgumentMatchers.any[Seq[ListOffsetsTopic]](), @@ -9000,7 +9741,7 @@ class KafkaApisTest extends Logging { .setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) + callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) }) val data = new ListOffsetsRequestData().setTopics(targetTimes).setReplicaId(ListOffsetsRequest.CONSUMER_REPLICA_ID) @@ -9023,11 +9764,11 @@ class KafkaApisTest extends Logging { val tp = new TopicPartition("foo", 0) val latestOffset = 15L - val targetTimes = List(new ListOffsetsTopic() + val targetTimes = util.List.of(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(List(new ListOffsetsPartition() + .setPartitions(util.List.of(new ListOffsetsPartition() .setPartitionIndex(tp.partition) - .setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)).asJava)).asJava + .setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)))) when(replicaManager.fetchOffset( ArgumentMatchers.eq(targetTimes.asScala.toSeq), @@ -9047,7 +9788,7 @@ class KafkaApisTest extends Logging { .setOffset(latestOffset) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) + callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) }) val listOffsetRequest = ListOffsetsRequest.Builder.forConsumer(true, isolationLevel) @@ -9069,7 +9810,7 @@ class KafkaApisTest extends Logging { private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = { val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - asList(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() + util.List.of(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() (writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest)) } @@ -9078,6 +9819,17 @@ class KafkaApisTest extends Logging { fromPrivilegedListener: Boolean = false, requestHeader: Option[RequestHeader] = None, requestMetrics: RequestChannelMetrics = requestChannelMetrics): RequestChannel.Request = { + buildRequest(request, new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "Alice"), InetAddress.getLocalHost, listenerName, + fromPrivilegedListener, requestHeader, requestMetrics) + } + + private def buildRequest(request: AbstractRequest, + principal: KafkaPrincipal, + clientAddress: InetAddress, + listenerName: ListenerName, + fromPrivilegedListener: Boolean, + requestHeader: Option[RequestHeader], + requestMetrics: RequestChannelMetrics): RequestChannel.Request = { val buffer = request.serializeWithHeader( requestHeader.getOrElse(new RequestHeader(request.apiKey, request.version, clientId, 0))) @@ -9087,8 +9839,8 @@ class KafkaApisTest extends Logging { // and have a non KafkaPrincipal.ANONYMOUS principal. This test is done before the check // for forwarding because after forwarding the context will have a different context. // We validate the context authenticated failure case in other integration tests. - val context = new RequestContext(header, "1", InetAddress.getLocalHost, Optional.empty(), - new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "Alice"), listenerName, SecurityProtocol.SSL, + val context = new RequestContext(header, "1", clientAddress, Optional.empty(), + principal, listenerName, SecurityProtocol.SSL, ClientInformation.EMPTY, fromPrivilegedListener, Optional.of(kafkaPrincipalSerde)) new RequestChannel.Request(processor = 1, context = context, startTimeNanos = 0, MemoryPool.NONE, buffer, requestMetrics, envelope = None) @@ -9104,13 +9856,13 @@ class KafkaApisTest extends Logging { any() ) val response = capturedResponse.getValue - val buffer = MessageUtil.toByteBufferAccessor( + val readable = MessageUtil.toByteBufferAccessor( response.data, request.context.header.apiVersion - ).buffer() + ) AbstractResponse.parseResponse( request.context.header.apiKey, - buffer, + readable, request.context.header.apiVersion, ).asInstanceOf[T] } @@ -9125,10 +9877,10 @@ class KafkaApisTest extends Logging { any() ) val response = capturedResponse.getValue - val buffer = MessageUtil.toByteBufferAccessor( + val readable = MessageUtil.toByteBufferAccessor( response.data, request.context.header.apiVersion - ).buffer() + ) // Create the RequestChannel.Response that is created when sendResponse is called in order to update the metrics. val sendResponse = new RequestChannel.SendResponse( @@ -9141,7 +9893,7 @@ class KafkaApisTest extends Logging { AbstractResponse.parseResponse( request.context.header.apiKey, - buffer, + readable, request.context.header.apiVersion, ).asInstanceOf[T] } @@ -9156,7 +9908,7 @@ class KafkaApisTest extends Logging { val topicRecord = new TopicRecord().setName(topic).setTopicId(topicId) results += topicRecord - val replicas = List(0.asInstanceOf[Integer]).asJava + val replicas = util.List.of(0.asInstanceOf[Integer]) def createPartitionRecord(partition: Int) = new PartitionRecord() .setTopicId(topicId) @@ -9211,7 +9963,7 @@ class KafkaApisTest extends Logging { val data = new AlterReplicaLogDirsRequestData() val dir = new AlterReplicaLogDirsRequestData.AlterReplicaLogDir() .setPath("/foo") - dir.topics().add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic().setName("t0").setPartitions(asList(0, 1, 2))) + dir.topics().add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic().setName("t0").setPartitions(util.List.of(0, 1, 2))) data.dirs().add(dir) val alterReplicaLogDirsRequest = new AlterReplicaLogDirsRequest.Builder( data @@ -9243,9 +9995,9 @@ class KafkaApisTest extends Logging { new TopicPartition(tr.topicName, pr.partitionIndex) -> Errors.forCode(pr.errorCode) } }.toMap) - assertEquals(Map(Errors.NONE -> 1, - Errors.LOG_DIR_NOT_FOUND -> 1, - Errors.INVALID_TOPIC_EXCEPTION -> 1).asJava, response.errorCounts) + assertEquals(util.Map.of(Errors.NONE, 1, + Errors.LOG_DIR_NOT_FOUND, 1, + Errors.INVALID_TOPIC_EXCEPTION, 1), response.errorCounts) } @Test @@ -9267,7 +10019,7 @@ class KafkaApisTest extends Logging { topicIds.put(tp.topicPartition.topic, tp.topicId) topicNames.put(tp.topicId, tp.topicPartition.topic) } - FetchResponse.of(Errors.NONE, 100, 100, responseData) + FetchResponse.of(Errors.NONE, 100, 100, responseData, List.empty.asJava) } val throttledPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("throttledData", 0)) @@ -9293,45 +10045,45 @@ class KafkaApisTest extends Logging { val tp4 = new TopicPartition("invalid;topic", 1) val authorizer: Authorizer = mock(classOf[Authorizer]) - val data = new DescribeProducersRequestData().setTopics(List( + val data = new DescribeProducersRequestData().setTopics(util.List.of( new DescribeProducersRequestData.TopicRequest() .setName(tp1.topic) - .setPartitionIndexes(List(Int.box(tp1.partition)).asJava), + .setPartitionIndexes(util.List.of(Int.box(tp1.partition))), new DescribeProducersRequestData.TopicRequest() .setName(tp2.topic) - .setPartitionIndexes(List(Int.box(tp2.partition)).asJava), + .setPartitionIndexes(util.List.of(Int.box(tp2.partition))), new DescribeProducersRequestData.TopicRequest() .setName(tp3.topic) - .setPartitionIndexes(List(Int.box(tp3.partition)).asJava), + .setPartitionIndexes(util.List.of(Int.box(tp3.partition))), new DescribeProducersRequestData.TopicRequest() .setName(tp4.topic) - .setPartitionIndexes(List(Int.box(tp4.partition)).asJava) - ).asJava) + .setPartitionIndexes(util.List.of(Int.box(tp4.partition))) + )) def buildExpectedActions(topic: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) val action = new Action(AclOperation.READ, pattern, 1, true, true) - Collections.singletonList(action) + util.List.of(action) } // Topic `foo` is authorized and present in the metadata addTopicToMetadataCache(tp1.topic, 4) // We will only access the first topic when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp1.topic)))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) // Topic `bar` is not authorized when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp2.topic)))) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) // Topic `baz` is authorized, but not present in the metadata when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp3.topic)))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) when(replicaManager.activeProducerState(tp1)) .thenReturn(new DescribeProducersResponseData.PartitionResponse() .setErrorCode(Errors.NONE.code) .setPartitionIndex(tp1.partition) - .setActiveProducers(List( + .setActiveProducers(util.List.of( new DescribeProducersResponseData.ProducerState() .setProducerId(12345L) .setProducerEpoch(15) @@ -9339,7 +10091,7 @@ class KafkaApisTest extends Logging { .setLastTimestamp(time.milliseconds()) .setCurrentTxnStartOffset(-1) .setCoordinatorEpoch(200) - ).asJava)) + ))) val describeProducersRequest = new DescribeProducersRequest.Builder(data).build() val request = buildRequest(describeProducersRequest) @@ -9381,7 +10133,7 @@ class KafkaApisTest extends Logging { def testDescribeTransactions(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) val data = new DescribeTransactionsRequestData() - .setTransactionalIds(List("foo", "bar").asJava) + .setTransactionalIds(util.List.of("foo", "bar")) val describeTransactionsRequest = new DescribeTransactionsRequest.Builder(data).build() val request = buildRequest(describeTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -9390,7 +10142,7 @@ class KafkaApisTest extends Logging { def buildExpectedActions(transactionalId: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - Collections.singletonList(action) + util.List.of(action) } when(txnCoordinator.handleDescribeTransactions("foo")) @@ -9404,10 +10156,10 @@ class KafkaApisTest extends Logging { .setTransactionTimeoutMs(10000)) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("foo")))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("bar")))) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleDescribeTransactionsRequest(request) @@ -9432,7 +10184,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) val transactionalId = "foo" val data = new DescribeTransactionsRequestData() - .setTransactionalIds(List(transactionalId).asJava) + .setTransactionalIds(util.List.of(transactionalId)) val describeTransactionsRequest = new DescribeTransactionsRequest.Builder(data).build() val request = buildRequest(describeTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -9445,10 +10197,10 @@ class KafkaApisTest extends Logging { ): Unit = { val pattern = new ResourcePattern(resourceType, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - val actions = Collections.singletonList(action) + val actions = util.List.of(action) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(actions))) - .thenReturn(Seq(result).asJava) + .thenReturn(util.List.of(result)) } // Principal is authorized to one of the two topics. The second topic should be @@ -9504,7 +10256,7 @@ class KafkaApisTest extends Logging { when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L)) + when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L, null)) .thenReturn(new ListTransactionsResponseData() .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code)) kafkaApis = createKafkaApis() @@ -9518,7 +10270,7 @@ class KafkaApisTest extends Logging { @Test def testListTransactionsAuthorization(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) - val data = new ListTransactionsRequestData() + val data = new ListTransactionsRequestData().setTransactionalIdPattern("my.*") val listTransactionsRequest = new ListTransactionsRequest.Builder(data).build() val request = buildRequest(listTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -9526,15 +10278,15 @@ class KafkaApisTest extends Logging { val transactionStates = new util.ArrayList[ListTransactionsResponseData.TransactionState]() transactionStates.add(new ListTransactionsResponseData.TransactionState() - .setTransactionalId("foo") + .setTransactionalId("myFoo") .setProducerId(12345L) .setTransactionState("Ongoing")) transactionStates.add(new ListTransactionsResponseData.TransactionState() - .setTransactionalId("bar") + .setTransactionalId("myBar") .setProducerId(98765) .setTransactionState("PrepareAbort")) - when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L)) + when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L, "my.*")) .thenReturn(new ListTransactionsResponseData() .setErrorCode(Errors.NONE.code) .setTransactionStates(transactionStates)) @@ -9542,21 +10294,21 @@ class KafkaApisTest extends Logging { def buildExpectedActions(transactionalId: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - Collections.singletonList(action) + util.List.of(action) } - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("foo")))) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("myFoo")))) + .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("bar")))) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("myBar")))) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleListTransactionsRequest(request) val response = verifyNoThrottling[ListTransactionsResponse](request) assertEquals(1, response.data.transactionStates.size()) val transactionState = response.data.transactionStates.get(0) - assertEquals("foo", transactionState.transactionalId) + assertEquals("myFoo", transactionState.transactionalId) assertEquals(12345L, transactionState.producerId) assertEquals("Ongoing", transactionState.transactionState) } @@ -9577,11 +10329,11 @@ class KafkaApisTest extends Logging { def testInvalidLegacyAlterConfigsRequestWithKRaft(): Unit = { val request = buildRequest(new AlterConfigsRequest(new AlterConfigsRequestData(). setValidateOnly(true). - setResources(new LAlterConfigsResourceCollection(asList( + setResources(new LAlterConfigsResourceCollection(util.List.of( new LAlterConfigsResource(). setResourceName(brokerId.toString). setResourceType(BROKER.id()). - setConfigs(new LAlterableConfigCollection(asList(new LAlterableConfig(). + setConfigs(new LAlterableConfigCollection(util.List.of(new LAlterableConfig(). setName("foo"). setValue(null)).iterator()))).iterator())), 1.toShort)) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) @@ -9590,7 +10342,7 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) val response = verifyNoThrottling[AlterConfigsResponse](request) - assertEquals(new AlterConfigsResponseData().setResponses(asList( + assertEquals(new AlterConfigsResponseData().setResponses(util.List.of( new LAlterConfigsResourceResponse(). setErrorCode(Errors.INVALID_REQUEST.code()). setErrorMessage("Null value not supported for : foo"). @@ -9601,7 +10353,12 @@ class KafkaApisTest extends Logging { @Test def testEmptyIncrementalAlterConfigsRequestWithKRaft(): Unit = { - val request = buildRequest(new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(), 1.toShort)) + val alterConfigsRequest = new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(), 1.toShort) + assertEquals( + "IncrementalAlterConfigsRequestData(resources=[], validateOnly=false)", + alterConfigsRequest.toString + ) + val request = buildRequest(alterConfigsRequest) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) @@ -9613,22 +10370,30 @@ class KafkaApisTest extends Logging { @Test def testLog4jIncrementalAlterConfigsRequestWithKRaft(): Unit = { - val request = buildRequest(new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(). + val alterConfigsRequest = new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(). setValidateOnly(true). - setResources(new IAlterConfigsResourceCollection(asList(new IAlterConfigsResource(). + setResources(new IAlterConfigsResourceCollection(util.List.of(new IAlterConfigsResource(). setResourceName(brokerId.toString). setResourceType(BROKER_LOGGER.id()). - setConfigs(new IAlterableConfigCollection(asList(new IAlterableConfig(). + setConfigs(new IAlterableConfigCollection(util.List.of(new IAlterableConfig(). setName(LoggingController.ROOT_LOGGER). - setValue("TRACE")).iterator()))).iterator())), - 1.toShort)) + setValue("TRACE")).iterator()))).iterator())), 1.toShort) + assertEquals( + "IncrementalAlterConfigsRequestData(resources=[" + + "AlterConfigsResource(resourceType=" + BROKER_LOGGER.id() + ", " + + "resourceName='"+ brokerId + "', " + + "configs=[AlterableConfig(name='" + LoggingController.ROOT_LOGGER + "', configOperation=0, value='REDACTED')])], " + + "validateOnly=true)", + alterConfigsRequest.toString + ) + val request = buildRequest(alterConfigsRequest) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) - assertEquals(new IncrementalAlterConfigsResponseData().setResponses(asList( + assertEquals(new IncrementalAlterConfigsResponseData().setResponses(util.List.of( new IAlterConfigsResourceResponse(). setErrorCode(0.toShort). setErrorMessage(null). @@ -9644,7 +10409,7 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) metadataCache = { val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY); + val delta = new MetadataDelta(MetadataImage.EMPTY) delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -9720,7 +10485,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis( authorizer = Some(authorizer), featureVersions = Seq(GroupVersion.GV_1) @@ -9741,7 +10506,7 @@ class KafkaApisTest extends Logging { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) - .setSubscribedTopicNames(List(fooTopicName, barTopicName, zarTopicName).asJava) + .setSubscribedTopicNames(util.List.of(fooTopicName, barTopicName, zarTopicName)) val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) @@ -9756,9 +10521,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } kafkaApis = createKafkaApis( @@ -9778,7 +10543,7 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) metadataCache = { val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY); + val delta = new MetadataDelta(MetadataImage.EMPTY) delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -9797,7 +10562,11 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupHeartbeatRequest(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") @@ -9808,22 +10577,25 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, streamsGroupHeartbeatRequest )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() .setMemberId("member") - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, Collections.emptyMap())) + future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, util.Map.of())) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) assertEquals(streamsGroupHeartbeatResponse, response.data) } @Test def testStreamsGroupHeartbeatRequestWithAuthorizedTopology(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) + val groupId = "group" val fooTopicName = "foo" val barTopicName = "bar" @@ -9837,12 +10609,12 @@ class KafkaApisTest extends Logging { .setSubtopologies( util.List.of( new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology1") - .setSourceTopics(Collections.singletonList(fooTopicName)) - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) - .setStateChangelogTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))), + .setSourceTopics(util.List.of(fooTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) + .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))), new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology2") - .setSourceTopics(Collections.singletonList(zarTopicName)) - .setRepartitionSourceTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(barTopicName))) + .setSourceTopics(util.List.of(zarTopicName)) + .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(barTopicName))) ) ) ) @@ -9863,9 +10635,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val future = new CompletableFuture[StreamsGroupHeartbeatResult]() @@ -9874,22 +10646,25 @@ class KafkaApisTest extends Logging { streamsGroupHeartbeatRequest )).thenReturn(future) kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() .setMemberId("member") - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, Collections.emptyMap())) + future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, util.Map.of())) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) assertEquals(streamsGroupHeartbeatResponse, response.data) } @Test def testStreamsGroupHeartbeatRequestFutureFailed(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") @@ -9900,9 +10675,7 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, streamsGroupHeartbeatRequest )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) @@ -9912,7 +10685,11 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupHeartbeatRequestGroupAuthorizationFailed(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") @@ -9920,10 +10697,9 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -9933,7 +10709,12 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupHeartbeatRequestTopicAuthorizationFailed(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) + val groupId = "group" val fooTopicName = "foo" val barTopicName = "bar" @@ -9945,10 +10726,10 @@ class KafkaApisTest extends Logging { .setEpoch(3) .setSubtopologies( util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(Collections.singletonList(fooTopicName)) - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) - .setRepartitionSourceTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(zarTopicName))) - .setStateChangelogTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))) + .setSourceTopics(util.List.of(fooTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) + .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(zarTopicName))) + .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))) ) ) ) @@ -9968,14 +10749,13 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -9984,7 +10764,7 @@ class KafkaApisTest extends Logging { } @Test - def testStreamsGroupHeartbeatRequestProtocolDisabled(): Unit = { + def testStreamsGroupHeartbeatRequestProtocolDisabledViaConfig(): Unit = { metadataCache = mock(classOf[KRaftMetadataCache]) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") @@ -10000,28 +10780,49 @@ class KafkaApisTest extends Logging { assertEquals(Errors.UNSUPPORTED_VERSION.code, response.data.errorCode) } + @Test + def testStreamsGroupHeartbeatRequestProtocolDisabledViaFeature(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 0.toShort)) + + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) + + val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + + val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) + assertEquals(Errors.UNSUPPORTED_VERSION.code, response.data.errorCode) + } + @Test def testStreamsGroupHeartbeatRequestInvalidTopicNames(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group").setTopology( new StreamsGroupHeartbeatRequestData.Topology() .setEpoch(3) .setSubtopologies( - Collections.singletonList(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(Collections.singletonList("a ")) - .setRepartitionSinkTopics(Collections.singletonList("b?")) - .setRepartitionSourceTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("c!"))) - .setStateChangelogTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("d/"))) + util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") + .setSourceTopics(util.List.of("a ")) + .setRepartitionSinkTopics(util.List.of("b?")) + .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("c!"))) + .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("d/"))) ) ) ) val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) @@ -10031,25 +10832,27 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupHeartbeatRequestInternalTopicNames(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group").setTopology( new StreamsGroupHeartbeatRequestData.Topology() .setEpoch(3) .setSubtopologies( - Collections.singletonList(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(Collections.singletonList("__consumer_offsets")) - .setRepartitionSinkTopics(Collections.singletonList("__transaction_state")) - .setRepartitionSourceTopics(Collections.singletonList(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("__share_group_state"))) + util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") + .setSourceTopics(util.List.of("__consumer_offsets")) + .setRepartitionSinkTopics(util.List.of("__transaction_state")) + .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("__share_group_state"))) ) ) ) val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) @@ -10059,9 +10862,13 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupHeartbeatRequestWithInternalTopicsToCreate(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group"); + val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) @@ -10071,9 +10878,7 @@ class KafkaApisTest extends Logging { streamsGroupHeartbeatRequest )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val missingTopics = Map("test" -> new CreatableTopic()) @@ -10083,14 +10888,18 @@ class KafkaApisTest extends Logging { future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics.asJava)) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) assertEquals(streamsGroupHeartbeatResponse, response.data) - verify(autoTopicCreationManager).createStreamsInternalTopics(missingTopics, requestChannelRequest.context) + verify(autoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) } @Test def testStreamsGroupHeartbeatRequestWithInternalTopicsToCreateMissingCreateACL(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group"); + val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) @@ -10103,27 +10912,29 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], isNotNull[util.List[Action]])).thenAnswer(invocation => { val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]] - actions.asScala.map { action => - if (action.resourcePattern.name.equals("test") && action.operation() == AclOperation.CREATE && action.resourcePattern().resourceType() == ResourceType.TOPIC) { + val results: util.List[AuthorizationResult] = new util.ArrayList[AuthorizationResult](actions.size()) + actions.forEach { action => + val result = if (action.resourcePattern.name == "test" && action.operation == AclOperation.CREATE && action.resourcePattern.resourceType == ResourceType.TOPIC) { AuthorizationResult.DENIED - } else if (action.operation() == AclOperation.CREATE && action.resourcePattern().resourceType() == ResourceType.CLUSTER) { + } else if (action.operation == AclOperation.CREATE && action.resourcePattern.resourceType == ResourceType.CLUSTER) { AuthorizationResult.DENIED } else { AuthorizationResult.ALLOWED } - }.asJava + results.add(result) + } + results }) kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val missingTopics = Map("test" -> new CreatableTopic()) + val missingTopics = util.Map.of("test", new CreatableTopic()) val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() .setMemberId("member") - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics.asJava)) + future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics)) val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) assertEquals(Errors.NONE.code, response.data.errorCode()) assertEquals(null, response.data.errorMessage()) @@ -10137,6 +10948,61 @@ class KafkaApisTest extends Logging { ) } + @Test + def testStreamsGroupHeartbeatRequestWithCachedTopicCreationErrors(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) + + val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + + val future = new CompletableFuture[StreamsGroupHeartbeatResult]() + when(groupCoordinator.streamsGroupHeartbeat( + requestChannelRequest.context, + streamsGroupHeartbeatRequest + )).thenReturn(future) + + // Mock AutoTopicCreationManager to return cached errors + val mockAutoTopicCreationManager = mock(classOf[AutoTopicCreationManager]) + when(mockAutoTopicCreationManager.getStreamsInternalTopicCreationErrors(ArgumentMatchers.eq(Set("test-topic")), any())) + .thenReturn(Map("test-topic" -> "INVALID_REPLICATION_FACTOR")) + // Mock the createStreamsInternalTopics method to do nothing (simulate topic creation attempt) + doNothing().when(mockAutoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) + + kafkaApis = createKafkaApis(autoTopicCreationManager = Some(mockAutoTopicCreationManager)) + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + // Group coordinator returns MISSING_INTERNAL_TOPICS status and topics to create + val missingTopics = util.Map.of("test-topic", new CreatableTopic()) + val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() + .setMemberId("member") + .setStatus(util.List.of( + new StreamsGroupHeartbeatResponseData.Status() + .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) + .setStatusDetail("Internal topics are missing: [test-topic]") + )) + + future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics)) + val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) + + assertEquals(Errors.NONE.code, response.data.errorCode()) + assertEquals(null, response.data.errorMessage()) + + // Verify that the cached error was appended to the existing status detail + assertEquals(1, response.data.status().size()) + val status = response.data.status().get(0) + assertEquals(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code(), status.statusCode()) + assertTrue(status.statusDetail().contains("Internal topics are missing: [test-topic]")) + assertTrue(status.statusDetail().contains("Creation failed: test-topic (INVALID_REPLICATION_FACTOR)")) + + // Verify that createStreamsInternalTopics was called + verify(mockAutoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) + verify(mockAutoTopicCreationManager).getStreamsInternalTopicCreationErrors(ArgumentMatchers.eq(Set("test-topic")), any()) + } + @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testConsumerGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { @@ -10144,7 +11010,7 @@ class KafkaApisTest extends Logging { val barTopicName = "bar" metadataCache = mock(classOf[KRaftMetadataCache]) - val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() .setIncludeAuthorizedOperations(includeAuthorizedOperations) consumerGroupDescribeRequestData.groupIds.addAll(groupIds) @@ -10163,42 +11029,42 @@ class KafkaApisTest extends Logging { val member0 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member0") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) val member1 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member1") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( + .setTopicPartitions(util.List.of( new TopicPartitions().setTopicName(fooTopicName), - new TopicPartitions().setTopicName(barTopicName)).asJava)) + new TopicPartitions().setTopicName(barTopicName)))) val member2 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member2") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(barTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) - future.complete(List( + future.complete(util.List.of( new DescribedGroup() .setGroupId(groupIds.get(0)) - .setMembers(List(member0).asJava), + .setMembers(util.List.of(member0)), new DescribedGroup() .setGroupId(groupIds.get(1)) - .setMembers(List(member0, member1).asJava), + .setMembers(util.List.of(member0, member1)), new DescribedGroup() .setGroupId(groupIds.get(2)) - .setMembers(List(member2).asJava) - ).asJava) + .setMembers(util.List.of(member2)) + )) var authorizedOperationsInt = Int.MinValue if (includeAuthorizedOperations) { @@ -10211,13 +11077,13 @@ class KafkaApisTest extends Logging { val describedGroups = List( new DescribedGroup() .setGroupId(groupIds.get(0)) - .setMembers(List(member0).asJava), + .setMembers(util.List.of(member0)), new DescribedGroup() .setGroupId(groupIds.get(1)) - .setMembers(List(member0, member1).asJava), + .setMembers(util.List.of(member0, member1)), new DescribedGroup() .setGroupId(groupIds.get(2)) - .setMembers(List(member2).asJava) + .setMembers(util.List.of(member2)) ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() .setGroups(describedGroups.asJava) @@ -10240,7 +11106,7 @@ class KafkaApisTest extends Logging { expectedResponse.groups.add(expectedDescribedGroup) metadataCache = { val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY); + val delta = new MetadataDelta(MetadataImage.EMPTY) delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -10265,14 +11131,14 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() when(groupCoordinator.consumerGroupDescribe( any[RequestContext], any[util.List[String]] )).thenReturn(future) - future.complete(List().asJava) + future.complete(util.List.of) kafkaApis = createKafkaApis( authorizer = Some(authorizer), featureVersions = Seq(GroupVersion.GV_1) @@ -10309,11 +11175,16 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testStreamsGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) + val fooTopicName = "foo" val barTopicName = "bar" - val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() .setIncludeAuthorizedOperations(includeAuthorizedOperations) streamsGroupDescribeRequestData.groupIds.addAll(groupIds) @@ -10324,40 +11195,38 @@ class KafkaApisTest extends Logging { any[RequestContext], any[util.List[String]] )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val subtopology0 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology0") - .setSourceTopics(Collections.singletonList(fooTopicName)) + .setSourceTopics(util.List.of(fooTopicName)) val subtopology1 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology1") - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) val subtopology2 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology2") - .setSourceTopics(Collections.singletonList(fooTopicName)) - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) + .setSourceTopics(util.List.of(fooTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) - future.complete(List( + future.complete(util.List.of( new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(0)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology0))), + .setSubtopologies(util.List.of(subtopology0))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(1)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology1))), + .setSubtopologies(util.List.of(subtopology1))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(2)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology2))) - ).asJava) + .setSubtopologies(util.List.of(subtopology2))) + )) - var authorizedOperationsInt = Int.MinValue; + var authorizedOperationsInt = Int.MinValue if (includeAuthorizedOperations) { authorizedOperationsInt = Utils.to32BitField( AclEntry.supportedOperations(ResourceType.GROUP).asScala @@ -10369,15 +11238,15 @@ class KafkaApisTest extends Logging { new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(0)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology0))), + .setSubtopologies(util.List.of(subtopology0))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(1)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology1))), + .setSubtopologies(util.List.of(subtopology1))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(2)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology2))) + .setSubtopologies(util.List.of(subtopology2))) ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) val expectedStreamsGroupDescribeResponseData = new StreamsGroupDescribeResponseData() .setGroups(describedGroups.asJava) @@ -10400,7 +11269,7 @@ class KafkaApisTest extends Logging { expectedResponse.groups.add(expectedDescribedGroup) metadataCache = { val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY); + val delta = new MetadataDelta(MetadataImage.EMPTY) delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -10417,7 +11286,11 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupDescribeAuthorizationFailed(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() streamsGroupDescribeRequestData.groupIds.add("group-id") @@ -10425,17 +11298,16 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() when(groupCoordinator.streamsGroupDescribe( any[RequestContext], any[util.List[String]] )).thenReturn(future) - future.complete(List().asJava) + future.complete(util.List.of) kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -10445,7 +11317,11 @@ class KafkaApisTest extends Logging { @Test def testStreamsGroupDescribeFutureFailed(): Unit = { + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() streamsGroupDescribeRequestData.groupIds.add("group-id") @@ -10456,9 +11332,7 @@ class KafkaApisTest extends Logging { any[RequestContext], any[util.List[String]] )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) @@ -10473,9 +11347,13 @@ class KafkaApisTest extends Logging { val barTopicName = "bar" val errorMessage = "The described group uses topics that the client is not authorized to describe." + val features = mock(classOf[FinalizedFeatures]) + when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.features()).thenReturn(features) - val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() .setIncludeAuthorizedOperations(includeAuthorizedOperations) streamsGroupDescribeRequestData.groupIds.addAll(groupIds) @@ -10494,9 +11372,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() @@ -10505,38 +11383,37 @@ class KafkaApisTest extends Logging { any[util.List[String]] )).thenReturn(future) kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,streams") + authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val subtopology0 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology0") - .setSourceTopics(Collections.singletonList(fooTopicName)) + .setSourceTopics(util.List.of(fooTopicName)) val subtopology1 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology1") - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) val subtopology2 = new StreamsGroupDescribeResponseData.Subtopology() .setSubtopologyId("subtopology2") - .setSourceTopics(Collections.singletonList(fooTopicName)) - .setRepartitionSinkTopics(Collections.singletonList(barTopicName)) + .setSourceTopics(util.List.of(fooTopicName)) + .setRepartitionSinkTopics(util.List.of(barTopicName)) - future.complete(List( + future.complete(util.List.of( new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(0)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology0))), + .setSubtopologies(util.List.of(subtopology0))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(1)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology1))), + .setSubtopologies(util.List.of(subtopology1))), new StreamsGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(2)) .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(Collections.singletonList(subtopology2))) - ).asJava) + .setSubtopologies(util.List.of(subtopology2))) + )) val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) assertNotNull(response.data) @@ -10556,7 +11433,7 @@ class KafkaApisTest extends Logging { metadataCache = mock(classOf[KRaftMetadataCache]) - val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() .setGroupIds(groupIds) val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) @@ -10574,9 +11451,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() @@ -10593,48 +11470,48 @@ class KafkaApisTest extends Logging { val member0 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member0") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) val member1 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member1") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( + .setTopicPartitions(util.List.of( new TopicPartitions().setTopicName(fooTopicName), - new TopicPartitions().setTopicName(barTopicName)).asJava)) + new TopicPartitions().setTopicName(barTopicName)))) val member2 = new ConsumerGroupDescribeResponseData.Member() .setMemberId("member2") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(barTopicName)))) .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new TopicPartitions().setTopicName(fooTopicName)))) - future.complete(List( + future.complete(util.List.of( new DescribedGroup() .setGroupId(groupIds.get(0)) - .setMembers(List(member0).asJava), + .setMembers(util.List.of(member0)), new DescribedGroup() .setGroupId(groupIds.get(1)) - .setMembers(List(member0, member1).asJava), + .setMembers(util.List.of(member0, member1)), new DescribedGroup() .setGroupId(groupIds.get(2)) - .setMembers(List(member2).asJava) - ).asJava) + .setMembers(util.List.of(member2)) + )) val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() - .setGroups(List( + .setGroups(util.List.of( new DescribedGroup() .setGroupId(groupIds.get(0)) - .setMembers(List(member0).asJava), + .setMembers(util.List.of(member0)), new DescribedGroup() .setGroupId(groupIds.get(1)) .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) @@ -10643,7 +11520,7 @@ class KafkaApisTest extends Logging { .setGroupId(groupIds.get(2)) .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) .setErrorMessage(errorMessage) - ).asJava) + )) val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) @@ -10724,47 +11601,252 @@ class KafkaApisTest extends Logging { } @Test - def testListClientMetricsResources(): Unit = { - val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + def testListConfigResourcesV0(): Unit = { + val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.LIST_CONFIG_RESOURCES)) + try { + val request = buildRequest(new ListConfigResourcesRequest.Builder( + new ListConfigResourcesRequestData().setResourceTypes(util.List.of(ConfigResource.Type.CLIENT_METRICS.id))).build(0), + requestMetrics = requestMetrics) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val resources = util.Set.of("client-metric1", "client-metric2") + when(clientMetricsManager.listClientMetricsResources).thenReturn(resources) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottlingAndUpdateMetrics[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + resources.stream.map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource) + ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(metadataCache, never).getAllTopics + verify(groupConfigManager, never).groupIds + verify(metadataCache, never).getBrokerNodes(any) + assertTrue(requestMetrics.apply(ApiKeys.LIST_CONFIG_RESOURCES.name).requestQueueTimeHist.count > 0) + assertTrue(requestMetrics.apply(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME).requestQueueTimeHist.count > 0) + } finally { + requestMetrics.close() + } + } + + @Test + def testListConfigResourcesV1WithEmptyResourceTypes(): Unit = { + val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.LIST_CONFIG_RESOURCES)) + try { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build(1), + requestMetrics = requestMetrics) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val clientMetrics = util.Set.of("client-metric1", "client-metric2") + val topics = util.Set.of("topic1", "topic2") + val groupIds = util.List.of("group1", "group2") + val nodeIds = util.List.of(1, 2) + when(clientMetricsManager.listClientMetricsResources).thenReturn(clientMetrics) + when(metadataCache.getAllTopics).thenReturn(topics) + when(groupConfigManager.groupIds).thenReturn(groupIds) + when(metadataCache.getBrokerNodes(any())).thenReturn( + nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottlingAndUpdateMetrics[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + util.stream.Stream.of( + groupIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.GROUP.id) + ).toList, + clientMetrics.stream.map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.CLIENT_METRICS.id) + ).toList, + nodeIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id) + ).toList, + nodeIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER.id) + ).toList, + topics.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.TOPIC.id) + ).toList + ).flatMap(s => s.stream).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + assertTrue(requestMetrics.apply(ApiKeys.LIST_CONFIG_RESOURCES.name).requestQueueTimeHist.count > 0) + assertEquals(0, requestMetrics.apply(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME).requestQueueTimeHist.count) + } finally { + requestMetrics.close() + } + } + + @Test + def testListConfigResourcesV1WithGroup(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.GROUP.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val groupIds = util.List.of("group1", "group2") + when(groupConfigManager.groupIds).thenReturn(groupIds) - val resources = new mutable.HashSet[String] - resources.add("test1") - resources.add("test2") - when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) - val expectedResponse = new ListClientMetricsResourcesResponseData().setClientMetricsResources( - resources.map(resource => new ClientMetricsResource().setName(resource)).toBuffer.asJava) - assertEquals(expectedResponse, response.data) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + groupIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.GROUP.id) + ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(metadataCache, never).getAllTopics + verify(clientMetricsManager, never).listClientMetricsResources + verify(metadataCache, never).getBrokerNodes(any) } @Test - def testListClientMetricsResourcesEmptyResponse(): Unit = { - val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + def testListConfigResourcesV1WithClientMetrics(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.CLIENT_METRICS.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val clientMetrics = util.Set.of("client-metric1", "client-metric2") + when(clientMetricsManager.listClientMetricsResources).thenReturn(clientMetrics) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + clientMetrics.stream.map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.CLIENT_METRICS.id) + ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(metadataCache, never).getAllTopics + verify(groupConfigManager, never).groupIds + verify(metadataCache, never).getBrokerNodes(any) + } + + @Test + def testListConfigResourcesV1WithBrokerLogger(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.BROKER_LOGGER.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val nodeIds = util.List.of(1, 2) + when(metadataCache.getBrokerNodes(any())).thenReturn( + nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + nodeIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id) + ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(metadataCache, never).getAllTopics + verify(groupConfigManager, never).groupIds + verify(clientMetricsManager, never).listClientMetricsResources + } + + @Test + def testListConfigResourcesV1WithBroker(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.BROKER.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val nodeIds = util.List.of(1, 2) + when(metadataCache.getBrokerNodes(any())).thenReturn( + nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + nodeIds.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER.id) + ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(metadataCache, never).getAllTopics + verify(groupConfigManager, never).groupIds + verify(clientMetricsManager, never).listClientMetricsResources + } + + @Test + def testListConfigResourcesV1WithTopic(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.TOPIC.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + val topics = util.Set.of("topic1", "topic2") + when(metadataCache.getAllTopics).thenReturn(topics) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponseData = new ListConfigResourcesResponseData() + .setConfigResources( + topics.stream().map(resource => + new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.TOPIC.id) + ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) + assertEquals(expectedResponseData, response.data) + + verify(groupConfigManager, never).groupIds + verify(clientMetricsManager, never).listClientMetricsResources + verify(metadataCache, never).getBrokerNodes(any) + } + + @Test + def testListConfigResourcesEmptyResponse(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build()) + metadataCache = mock(classOf[KRaftMetadataCache]) + + when(clientMetricsManager.listClientMetricsResources).thenReturn(util.Set.of) + when(metadataCache.getAllTopics).thenReturn(util.Set.of) + when(groupConfigManager.groupIds).thenReturn(util.List.of) + when(metadataCache.getBrokerNodes(any())).thenReturn(util.List.of) - val resources = new mutable.HashSet[String] - when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) - val expectedResponse = new ListClientMetricsResourcesResponseData() + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + val expectedResponse = new ListConfigResourcesResponseData() assertEquals(expectedResponse, response.data) } @Test - def testListClientMetricsResourcesWithException(): Unit = { - val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) + def testListConfigResourcesV1WithUnknown(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() + .setResourceTypes(util.List.of(ConfigResource.Type.UNKNOWN.id))).build(1)) + metadataCache = mock(classOf[KRaftMetadataCache]) + + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) + assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data.errorCode()) + + verify(metadataCache, never).getAllTopics + verify(groupConfigManager, never).groupIds + verify(clientMetricsManager, never).listClientMetricsResources + verify(metadataCache, never).getBrokerNodes(any) + } + + @Test + def testListConfigResourcesWithException(): Unit = { + val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build()) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientMetricsManager.listClientMetricsResources).thenThrow(new RuntimeException("test")) kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) + val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponse = new ListClientMetricsResourcesResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) + val expectedResponse = new ListConfigResourcesResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) assertEquals(expectedResponse, response.data) } @@ -10772,9 +11854,11 @@ class KafkaApisTest extends Logging { def testShareGroupHeartbeatReturnsUnsupportedVersion(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) + metadataCache = mock(classOf[KRaftMetadataCache]) + kafkaApis = createKafkaApis( + featureVersions = Seq(ShareVersion.SV_0) + ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val expectedHeartbeatResponse = new ShareGroupHeartbeatResponseData() @@ -10787,17 +11871,15 @@ class KafkaApisTest extends Logging { def testShareGroupHeartbeatRequest(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() when(groupCoordinator.shareGroupHeartbeat( requestChannelRequest.context, shareGroupHeartbeatRequest )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val shareGroupHeartbeatResponse = new ShareGroupHeartbeatResponseData() @@ -10812,14 +11894,13 @@ class KafkaApisTest extends Logging { def testShareGroupHeartbeatRequestGroupAuthorizationFailed(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -10830,7 +11911,7 @@ class KafkaApisTest extends Logging { @Test def testShareGroupHeartbeatRequestTopicAuthorizationFailed(): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val groupId = "group" val fooTopicName = "foo" val barTopicName = "bar" @@ -10838,7 +11919,7 @@ class KafkaApisTest extends Logging { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData() .setGroupId(groupId) - .setSubscribedTopicNames(List(fooTopicName, barTopicName, zarTopicName).asJava) + .setSubscribedTopicNames(util.List.of(fooTopicName, barTopicName, zarTopicName)) val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) @@ -10853,13 +11934,12 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -10872,17 +11952,15 @@ class KafkaApisTest extends Logging { def testShareGroupHeartbeatRequestFutureFailed(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() when(groupCoordinator.shareGroupHeartbeat( requestChannelRequest.context, shareGroupHeartbeatRequest )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) @@ -10895,44 +11973,43 @@ class KafkaApisTest extends Logging { val fooTopicName = "foo" val barTopicName = "bar" - val groupIds = List("share-group-id-0", "share-group-id-1", "share-group_id-2").asJava + val groupIds = util.List.of("share-group-id-0", "share-group-id-1", "share-group_id-2") val member0 = new ShareGroupDescribeResponseData.Member() .setMemberId("member0") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)))) val member1 = new ShareGroupDescribeResponseData.Member() .setMemberId("member1") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( + .setTopicPartitions(util.List.of( new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName), - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)).asJava)) + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) val member2 = new ShareGroupDescribeResponseData.Member() .setMemberId("member2") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)).setMembers(List(member0).asJava), - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)).setMembers(List(member1).asJava), - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(2)).setMembers(List(member2).asJava) - ).asJava - getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") - , true, null, describedGroups) + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)).setMembers(util.List.of(member0)), + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)).setMembers(util.List.of(member1)), + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(2)).setMembers(util.List.of(member2)) + ) + getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = true, null, describedGroups) } @Test def testShareGroupDescribeReturnsUnsupportedVersion(): Unit = { - val groupIds = List("share-group-id-0", "share-group-id-1").asJava - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + val groupIds = util.List.of("share-group-id-0", "share-group-id-1") + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)), new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) - ).asJava - val response = getShareGroupDescribeResponse(groupIds, Map.empty, false, null, describedGroups) + ) + val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = false, verifyNoErr = false, null, describedGroups) assertNotNull(response.data) assertEquals(2, response.data.groups.size) response.data.groups.forEach(group => assertEquals(Errors.UNSUPPORTED_VERSION.code(), group.errorCode())) @@ -10940,13 +12017,12 @@ class KafkaApisTest extends Logging { @Test def testShareGroupDescribeRequestAuthorizationFailed(): Unit = { - val groupIds = List("share-group-id-0", "share-group-id-1").asJava - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List().asJava + val groupIds = util.List.of("share-group-id-0", "share-group-id-1") + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava) - val response = getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") - , false, authorizer, describedGroups) + .thenReturn(util.List.of(AuthorizationResult.DENIED)) + val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) assertNotNull(response.data) assertEquals(2, response.data.groups.size) response.data.groups.forEach(group => assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code(), group.errorCode())) @@ -10954,17 +12030,16 @@ class KafkaApisTest extends Logging { @Test def testShareGroupDescribeRequestAuthorizationFailedForOneGroup(): Unit = { - val groupIds = List("share-group-id-fail-0", "share-group-id-1").asJava - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + val groupIds = util.List.of("share-group-id-fail-0", "share-group-id-1") + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) - ).asJava + ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") - , false, authorizer, describedGroups) + val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) assertNotNull(response.data) assertEquals(2, response.data.groups.size) @@ -10978,7 +12053,7 @@ class KafkaApisTest extends Logging { val barTopicName = "bar" val errorMessage = "The group has described topic(s) that the client is not authorized to describe." - val groupIds = List("share-group-id-0", "share-group-id-1", "share-group_id-2").asJava + val groupIds = util.List.of("share-group-id-0", "share-group-id-1", "share-group_id-2") val authorizer: Authorizer = mock(classOf[Authorizer]) val acls = Map( @@ -10993,42 +12068,41 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } val member0 = new ShareGroupDescribeResponseData.Member() .setMemberId("member0") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)))) val member1 = new ShareGroupDescribeResponseData.Member() .setMemberId("member1") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( + .setTopicPartitions(util.List.of( new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName), - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)).asJava)) + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) val member2 = new ShareGroupDescribeResponseData.Member() .setMemberId("member2") .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTopicPartitions(util.List.of( + new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(0)) - .setMembers(List(member0).asJava), + .setMembers(util.List.of(member0)), new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(1)) - .setMembers(List(member1).asJava), + .setMembers(util.List.of(member1)), new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupIds.get(2)) - .setMembers(List(member2).asJava)).asJava + .setMembers(util.List.of(member2))) - val response = getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") - , false, authorizer, describedGroups) + val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) assertNotNull(response.data) assertEquals(3, response.data.groups.size) @@ -11041,46 +12115,42 @@ class KafkaApisTest extends Logging { @Test def testReadShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val readRequestData = new ReadShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new ReadShareGroupStateRequestData.ReadStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) - val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = List( + val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = util.List.of( new ReadShareGroupStateResponseData.ReadStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) .setStateEpoch(1) .setStartOffset(10) - .setStateBatches(List( + .setStateBatches(util.List.of( new ReadShareGroupStateResponseData.StateBatch() .setFirstOffset(11) .setLastOffset(15) .setDeliveryState(0) .setDeliveryCount(1) - ).asJava) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) + )) ) - val response = getReadShareGroupResponse( + val response = getReadShareGroupStateResponse( readRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, readStateResultData @@ -11092,50 +12162,46 @@ class KafkaApisTest extends Logging { @Test def testReadShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val readRequestData = new ReadShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new ReadShareGroupStateRequestData.ReadStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) - val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = List( + val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = util.List.of( new ReadShareGroupStateResponseData.ReadStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) .setStateEpoch(1) .setStartOffset(10) - .setStateBatches(List( + .setStateBatches(util.List.of( new ReadShareGroupStateResponseData.StateBatch() .setFirstOffset(11) .setLastOffset(15) .setDeliveryState(0) .setDeliveryCount(1) - ).asJava) - ).asJava) - ).asJava + )) + )) + ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", - ) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getReadShareGroupResponse( + val response = getReadShareGroupStateResponse( readRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, readStateResultData @@ -11151,39 +12217,35 @@ class KafkaApisTest extends Logging { @Test def testReadShareGroupStateSummarySuccess(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateSummaryRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) - val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = List( + val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = util.List.of( new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateSummaryResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) .setStateEpoch(1) .setStartOffset(10) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) ) - val response = getReadShareGroupSummaryResponse( + val response = getReadShareGroupStateSummaryResponse( readSummaryRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, readStateSummaryResultData @@ -11195,43 +12257,39 @@ class KafkaApisTest extends Logging { @Test def testReadShareGroupStateSummaryAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateSummaryRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) - ).asJava) - ).asJava) + )) + )) - val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = List( + val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = util.List.of( new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new ReadShareGroupStateSummaryResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) .setStateEpoch(1) .setStartOffset(10) - ).asJava) - ).asJava + )) + ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", - ) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getReadShareGroupSummaryResponse( + val response = getReadShareGroupStateSummaryResponse( readSummaryRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, readStateSummaryResultData @@ -11253,7 +12311,7 @@ class KafkaApisTest extends Logging { )) ) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build()) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build()) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11270,14 +12328,13 @@ class KafkaApisTest extends Logging { )) ) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11298,14 +12355,13 @@ class KafkaApisTest extends Logging { util.List.of(new DescribeShareGroupOffsetsRequestGroup().setGroupId("group").setTopics(null)) ) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11328,7 +12384,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid val topicName3 = "topic-3" val topicId3 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 1, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -11349,7 +12405,7 @@ class KafkaApisTest extends Logging { val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] when(groupCoordinator.describeShareGroupOffsets( @@ -11361,9 +12417,7 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, describeShareGroupOffsetsRequestGroup2 )).thenReturn(futureGroup2) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() @@ -11444,7 +12498,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid val topicName3 = "topic-3" val topicId3 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 1, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -11465,7 +12519,7 @@ class KafkaApisTest extends Logging { val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) // The group coordinator will only be asked for information about topics which are authorized val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] @@ -11500,12 +12554,11 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11622,7 +12675,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid val topicName3 = "topic-3" val topicId3 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 1, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -11634,7 +12687,7 @@ class KafkaApisTest extends Logging { val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) // The group coordinator is being asked for information about all topics, not just those which are authorized val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] @@ -11662,12 +12715,11 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.asScala.map { action => - acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) - }.asJava + actions.stream() + .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) + .toList } kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11784,7 +12836,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid val topicName3 = "topic-3" val topicId3 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 1, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -11796,7 +12848,7 @@ class KafkaApisTest extends Logging { val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] when(groupCoordinator.describeShareGroupAllOffsets( @@ -11808,9 +12860,7 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, describeShareGroupOffsetsRequestGroup2 )).thenReturn(futureGroup2) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() @@ -11885,16 +12935,14 @@ class KafkaApisTest extends Logging { @Test def testDescribeShareGroupOffsetsRequestEmptyGroupsSuccess(): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val future = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val describeShareGroupOffsetsResponseGroup = new DescribeShareGroupOffsetsResponseGroup() @@ -11908,22 +12956,20 @@ class KafkaApisTest extends Logging { @Test def testDescribeShareGroupOffsetsRequestEmptyTopicsSuccess(): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() val describeShareGroupOffsetsRequestGroup = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group") val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData().setGroups(util.List.of(describeShareGroupOffsetsRequestGroup)) - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) val future = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] when(groupCoordinator.describeShareGroupOffsets( requestChannelRequest.context, describeShareGroupOffsetsRequestGroup )).thenReturn(future) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val describeShareGroupOffsetsResponseGroup = new DescribeShareGroupOffsetsResponseGroup() @@ -11941,31 +12987,30 @@ class KafkaApisTest extends Logging { def testDeleteShareGroupOffsetsReturnsUnsupportedVersion(): Unit = { val deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") - .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1").setPartitions(util.List.of(1)))) + .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1"))) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest, true).build()) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest).build()) metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - response.data.responses.forEach(topic => topic.partitions.forEach(partition => assertEquals(Errors.UNSUPPORTED_VERSION.code, partition.errorCode))) + response.data.responses.forEach(topic => assertEquals(Errors.UNSUPPORTED_VERSION.code, topic.errorCode)) } @Test def testDeleteShareGroupOffsetsRequestsGroupAuthorizationFailed(): Unit = { val deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") - .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1").setPartitions(util.List.of(1)))) + .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1"))) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest).build) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11980,13 +13025,13 @@ class KafkaApisTest extends Logging { def buildExpectedActionsTopic(topic: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) val action = new Action(AclOperation.READ, pattern, 1, true, true) - Collections.singletonList(action) + util.List.of(action) } def buildExpectedActionsGroup(topic: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.GROUP, topic, PatternType.LITERAL) val action = new Action(AclOperation.DELETE, pattern, 1, true, true) - Collections.singletonList(action) + util.List.of(action) } val groupId = "group" @@ -11995,17 +13040,15 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid val topicName2 = "topic-2" val topicId2 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 2, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName1) - .setPartitions(util.List.of(0, 1)) val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName2) - .setPartitions(util.List.of(0, 1)) val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() .setGroupId(groupId) @@ -12015,7 +13058,7 @@ class KafkaApisTest extends Logging { .setGroupId(groupId) .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic2)) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] when(groupCoordinator.deleteShareGroupOffsets( @@ -12032,7 +13075,6 @@ class KafkaApisTest extends Logging { .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -12044,33 +13086,18 @@ class KafkaApisTest extends Logging { new DeleteShareGroupOffsetsResponseTopic() .setTopicName(topicName2) .setTopicId(topicId2) - .setPartitions(util.List.of( - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - )) - )) + .setErrorMessage(null) + .setErrorCode(Errors.NONE.code()) + ) + ) val expectedResponseTopics: util.List[DeleteShareGroupOffsetsResponseTopic] = new util.ArrayList[DeleteShareGroupOffsetsResponseTopic]() expectedResponseTopics.add( new DeleteShareGroupOffsetsResponseTopic() .setTopicName(topicName1) - .setPartitions(util.List.of( - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message()), - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message()) - )) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message()) ) deleteShareGroupOffsetsResponseData.responses.forEach{ topic => { @@ -12095,28 +13122,25 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid val topicName3 = "topic-3" val topicId3 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) addTopicToMetadataCache(topicName3, 3, topicId = topicId3) val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName1) - .setPartitions(util.List.of(0)) val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName2) - .setPartitions(util.List.of(0, 1)) val deleteShareGroupOffsetsRequestTopic3 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName3) - .setPartitions(util.List.of(0, 1, 2)) val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2, deleteShareGroupOffsetsRequestTopic3)) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] when(groupCoordinator.deleteShareGroupOffsets( @@ -12124,9 +13148,7 @@ class KafkaApisTest extends Logging { deleteShareGroupOffsetsRequestData )).thenReturn(resultFuture) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() @@ -12136,42 +13158,18 @@ class KafkaApisTest extends Logging { new DeleteShareGroupOffsetsResponseTopic() .setTopicName(topicName1) .setTopicId(topicId1) - .setPartitions(util.List.of( - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - )), + .setErrorMessage(null) + .setErrorCode(Errors.NONE.code()), new DeleteShareGroupOffsetsResponseTopic() .setTopicName(topicName2) .setTopicId(topicId2) - .setPartitions(util.List.of( - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - )), + .setErrorMessage(null) + .setErrorCode(Errors.NONE.code()), new DeleteShareGroupOffsetsResponseTopic() .setTopicName(topicName3) .setTopicId(topicId3) - .setPartitions(util.List.of( - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - )) + .setErrorMessage(null) + .setErrorCode(Errors.NONE.code()), )) resultFuture.complete(deleteShareGroupOffsetsResponseData) @@ -12185,32 +13183,28 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid val topicName2 = "topic-2" val topicId2 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName1) - .setPartitions(util.List.of(0)) val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName2) - .setPartitions(util.List.of(0, 1)) val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2)) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) when(groupCoordinator.deleteShareGroupOffsets( requestChannelRequest.context, deleteShareGroupOffsetsRequestData )).thenReturn(CompletableFuture.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception)) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() @@ -12227,23 +13221,21 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid val topicName2 = "topic-2" val topicId2 = Uuid.randomUuid - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName1) - .setPartitions(util.List.of(0)) val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() .setTopicName(topicName2) - .setPartitions(util.List.of(0, 1)) val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2)) - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) val groupCoordinatorResponse: DeleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) @@ -12254,9 +13246,7 @@ class KafkaApisTest extends Logging { deleteShareGroupOffsetsRequestData )).thenReturn(CompletableFuture.completedFuture(groupCoordinatorResponse)) - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() @@ -12269,17 +13259,23 @@ class KafkaApisTest extends Logging { @Test def testDeleteShareGroupOffsetsRequestEmptyTopicsSuccess(): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - val deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequestData() + val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() .setGroupId("group") - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest, true).build) + val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) + + val groupCoordinatorResponse: DeleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() + .setErrorCode(Errors.NONE.code()) + + when(groupCoordinator.deleteShareGroupOffsets( + requestChannelRequest.context, + deleteShareGroupOffsetsRequestData + )).thenReturn(CompletableFuture.completedFuture(groupCoordinatorResponse)) val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] - kafkaApis = createKafkaApis( - overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - ) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val deleteShareGroupOffsetsResponse = new DeleteShareGroupOffsetsResponseData() @@ -12291,46 +13287,42 @@ class KafkaApisTest extends Logging { @Test def testWriteShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val writeRequestData = new WriteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new WriteShareGroupStateRequestData.WriteStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new WriteShareGroupStateRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) .setStateEpoch(2) .setStartOffset(10) - .setStateBatches(List( + .setStateBatches(util.List.of( new WriteShareGroupStateRequestData.StateBatch() .setFirstOffset(11) .setLastOffset(15) .setDeliveryCount(1) .setDeliveryState(0) - ).asJava) - ).asJava) - ).asJava) + )) + )) + )) - val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = List( + val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = util.List.of( new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new WriteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) ) - val response = getWriteShareGroupResponse( + val response = getWriteShareGroupStateResponse( writeRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, writeStateResultData @@ -12342,50 +13334,46 @@ class KafkaApisTest extends Logging { @Test def testWriteShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val writeRequestData = new WriteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new WriteShareGroupStateRequestData.WriteStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new WriteShareGroupStateRequestData.PartitionData() .setPartition(1) .setLeaderEpoch(1) .setStateEpoch(2) .setStartOffset(10) - .setStateBatches(List( + .setStateBatches(util.List.of( new WriteShareGroupStateRequestData.StateBatch() .setFirstOffset(11) .setLastOffset(15) .setDeliveryCount(1) .setDeliveryState(0) - ).asJava) - ).asJava) - ).asJava) + )) + )) + )) - val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = List( + val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = util.List.of( new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new WriteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getWriteShareGroupResponse( + val response = getWriteShareGroupStateResponse( writeRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, writeStateResultData @@ -12401,36 +13389,32 @@ class KafkaApisTest extends Logging { @Test def testDeleteShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val deleteRequestData = new DeleteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new DeleteShareGroupStateRequestData.DeleteStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new DeleteShareGroupStateRequestData.PartitionData() .setPartition(1) - ).asJava) - ).asJava) + )) + )) - val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = List( + val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = util.List.of( new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new DeleteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) ) - val response = getDeleteShareGroupResponse( + val response = getDeleteShareGroupStateResponse( deleteRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, deleteStateResultData @@ -12442,40 +13426,36 @@ class KafkaApisTest extends Logging { @Test def testDeleteShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val deleteRequestData = new DeleteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new DeleteShareGroupStateRequestData.DeleteStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new DeleteShareGroupStateRequestData.PartitionData() .setPartition(1) - ).asJava) - ).asJava) + )) + )) - val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = List( + val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = util.List.of( new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new DeleteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava + )) + ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", - ) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getDeleteShareGroupResponse( + val response = getDeleteShareGroupStateResponse( deleteRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, deleteStateResultData @@ -12491,37 +13471,33 @@ class KafkaApisTest extends Logging { @Test def testInitializeShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val initRequestData = new InitializeShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new InitializeShareGroupStateRequestData.InitializeStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new InitializeShareGroupStateRequestData.PartitionData() .setPartition(1) .setStateEpoch(0) - ).asJava) - ).asJava) + )) + )) - val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = List( + val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = util.List.of( new InitializeShareGroupStateResponseData.InitializeStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new InitializeShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + )) ) - val response = getInitializeShareGroupResponse( + val response = getInitializeShareGroupStateResponse( initRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, initStateResultData @@ -12533,41 +13509,37 @@ class KafkaApisTest extends Logging { @Test def testInitializeShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid(); + val topicId = Uuid.randomUuid() val initRequestData = new InitializeShareGroupStateRequestData() .setGroupId("group1") - .setTopics(List( + .setTopics(util.List.of( new InitializeShareGroupStateRequestData.InitializeStateData() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new InitializeShareGroupStateRequestData.PartitionData() .setPartition(1) .setStateEpoch(0) - ).asJava) - ).asJava) + )) + )) - val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = List( + val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = util.List.of( new InitializeShareGroupStateResponseData.InitializeStateResult() .setTopicId(topicId) - .setPartitions(List( + .setPartitions(util.List.of( new InitializeShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - ).asJava) - ).asJava + )) + ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - - val config = Map( - ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", - ) + .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - val response = getInitializeShareGroupResponse( + val response = getInitializeShareGroupStateResponse( initRequestData, - config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, initStateResultData @@ -12581,21 +13553,228 @@ class KafkaApisTest extends Logging { }) } - def getShareGroupDescribeResponse(groupIds: util.List[String], configOverrides: Map[String, String] = Map.empty, + @Test + def testAlterShareGroupOffsetsReturnsUnsupportedVersion(): Unit = { + val alterShareGroupOffsetsRequest = new AlterShareGroupOffsetsRequestData() + .setGroupId("group") + .setTopics( + new AlterShareGroupOffsetsRequestTopicCollection( + util.List.of( + new AlterShareGroupOffsetsRequestTopic() + .setTopicName("topic-1") + .setPartitions(util.List.of( + new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(0).setStartOffset(0), + new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(1).setStartOffset(0)) + ), + new AlterShareGroupOffsetsRequestTopic() + .setTopicName("topic-2") + .setPartitions(util.List.of( + new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(0).setStartOffset(0)) + ) + ).iterator() + ) + ) + + val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterShareGroupOffsetsRequest).build()) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups = false) + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) + response.data.responses.forEach(topic => { + topic.partitions().forEach(partition => assertEquals(Errors.UNSUPPORTED_VERSION.code, partition.errorCode)) + }) + } + + @Test + def testAlterShareGroupOffsetsSuccess(): Unit = { + val groupId = "group" + val topicName1 = "foo" + val topicId1 = Uuid.randomUuid + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName1, 2, topicId = topicId1) + val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); + topicCollection.addAll(util.List.of( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topicName1) + .setPartitions(List( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(0) + .setStartOffset(0L), + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(1) + .setStartOffset(0L) + ).asJava))) + + val alterRequestData = new AlterShareGroupOffsetsRequestData() + .setGroupId(groupId) + .setTopics(topicCollection) + + val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) + val resultFuture = new CompletableFuture[AlterShareGroupOffsetsResponseData] + when(groupCoordinator.alterShareGroupOffsets( + any(), + ArgumentMatchers.eq[String](groupId), + ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) + )).thenReturn(resultFuture) + + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val alterShareGroupOffsetsResponse = new AlterShareGroupOffsetsResponseData() + resultFuture.complete(alterShareGroupOffsetsResponse) + val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) + assertEquals(alterShareGroupOffsetsResponse, response.data) + } + + @Test + def testAlterShareGroupOffsetsAuthorizationFailed(): Unit = { + val groupId = "group" + val topicName1 = "foo" + val topicId1 = Uuid.randomUuid + val topicName2 = "bar" + val topicId2 = Uuid.randomUuid + val topicName3 = "zoo" + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName1, 2, topicId = topicId1) + addTopicToMetadataCache(topicName2, 1, topicId = topicId2) + val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); + topicCollection.addAll(util.List.of( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topicName1) + .setPartitions(List( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(0) + .setStartOffset(0L), + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(1) + .setStartOffset(0L) + ).asJava), + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topicName2) + .setPartitions(List( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(0) + .setStartOffset(0L) + ).asJava), + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topicName3) + setPartitions(List( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(0) + .setStartOffset(0L) + ).asJava)) + ) + + val authorizer: Authorizer = mock(classOf[Authorizer]) + when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava, Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) + + val alterRequestData = new AlterShareGroupOffsetsRequestData() + .setGroupId(groupId) + .setTopics(topicCollection) + + val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) + val resultFuture = new CompletableFuture[AlterShareGroupOffsetsResponseData] + when(groupCoordinator.alterShareGroupOffsets( + any(), + ArgumentMatchers.eq[String](groupId), + ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) + )).thenReturn(resultFuture) + + kafkaApis = createKafkaApis(authorizer = Some(authorizer)) + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val alterShareGroupOffsetsResponse = new AlterShareGroupOffsetsResponseData() + .setResponses(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(util.List.of( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() + .setTopicName(topicName2) + .setTopicId(topicId2) + .setPartitions(List( + new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage(Errors.NONE.message()) + ).asJava) + ).iterator)) + resultFuture.complete(alterShareGroupOffsetsResponse) + val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) + + assertNotNull(response.data) + assertEquals(1, response.errorCounts().get(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + assertEquals(2, response.errorCounts().get(Errors.TOPIC_AUTHORIZATION_FAILED)) + assertEquals(3, response.data().responses().size()) + + val bar = response.data().responses().find("bar") + val foo = response.data().responses().find("foo") + val zoo = response.data().responses().find("zoo") + assertEquals(topicName1, foo.topicName()) + assertEquals(topicId1, foo.topicId()) + assertEquals(topicName2, bar.topicName()) + assertEquals(topicId2, bar.topicId()) + assertEquals(topicName3, zoo.topicName()) + assertEquals(Uuid.ZERO_UUID, zoo.topicId()) + foo.partitions().forEach(partition => { + assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), partition.errorCode()) + }) + } + + @Test + def testAlterShareGroupOffsetsRequestGroupCoordinatorThrowsError(): Unit = { + val groupId = "group" + val topicName1 = "foo" + val topicId1 = Uuid.randomUuid + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + addTopicToMetadataCache(topicName1, 2, topicId = topicId1) + val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); + topicCollection.addAll(util.List.of( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() + .setTopicName(topicName1) + .setPartitions(List( + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(0) + .setStartOffset(0L), + new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() + .setPartitionIndex(1) + .setStartOffset(0L) + ).asJava))) + + val alterRequestData = new AlterShareGroupOffsetsRequestData() + .setGroupId(groupId) + .setTopics(topicCollection) + + val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) + when(groupCoordinator.alterShareGroupOffsets( + any(), + ArgumentMatchers.eq[String](groupId), + ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) + )).thenReturn(CompletableFuture.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception)) + + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val alterShareGroupOffsetsResponseData = new AlterShareGroupOffsetsResponseData() + .setErrorMessage(Errors.UNKNOWN_SERVER_ERROR.message()) + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) + + val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) + assertEquals(alterShareGroupOffsetsResponseData, response.data) + } + + def getShareGroupDescribeResponse(groupIds: util.List[String], enableShareGroups: Boolean = true, verifyNoErr: Boolean = true, authorizer: Authorizer = null, describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup]): ShareGroupDescribeResponse = { val shareGroupDescribeRequestData = new ShareGroupDescribeRequestData() shareGroupDescribeRequestData.groupIds.addAll(groupIds) - val requestChannelRequest = buildRequest(new ShareGroupDescribeRequest.Builder(shareGroupDescribeRequestData, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupDescribeRequest.Builder(shareGroupDescribeRequestData).build()) val future = new CompletableFuture[util.List[ShareGroupDescribeResponseData.DescribedGroup]]() when(groupCoordinator.shareGroupDescribe( any[RequestContext], any[util.List[String]] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups) kafkaApis = createKafkaApis( - overrideProperties = configOverrides, authorizer = Option(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -12611,17 +13790,17 @@ class KafkaApisTest extends Logging { response } - def getReadShareGroupResponse(requestData: ReadShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - readStateResult: util.List[ReadShareGroupStateResponseData.ReadStateResult]): ReadShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new ReadShareGroupStateRequest.Builder(requestData, true).build()) + def getReadShareGroupStateResponse(requestData: ReadShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + readStateResult: util.List[ReadShareGroupStateResponseData.ReadStateResult]): ReadShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new ReadShareGroupStateRequest.Builder(requestData).build()) val future = new CompletableFuture[ReadShareGroupStateResponseData]() when(shareCoordinator.readState( any[RequestContext], any[ReadShareGroupStateRequestData] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -12640,17 +13819,17 @@ class KafkaApisTest extends Logging { response } - def getReadShareGroupSummaryResponse(requestData: ReadShareGroupStateSummaryRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - readStateSummaryResult: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult]): ReadShareGroupStateSummaryResponse = { - val requestChannelRequest = buildRequest(new ReadShareGroupStateSummaryRequest.Builder(requestData, true).build()) + def getReadShareGroupStateSummaryResponse(requestData: ReadShareGroupStateSummaryRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + readStateSummaryResult: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult]): ReadShareGroupStateSummaryResponse = { + val requestChannelRequest = buildRequest(new ReadShareGroupStateSummaryRequest.Builder(requestData).build()) val future = new CompletableFuture[ReadShareGroupStateSummaryResponseData]() when(shareCoordinator.readStateSummary( any[RequestContext], any[ReadShareGroupStateSummaryRequestData] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -12669,17 +13848,17 @@ class KafkaApisTest extends Logging { response } - def getWriteShareGroupResponse(requestData: WriteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - writeStateResult: util.List[WriteShareGroupStateResponseData.WriteStateResult]): WriteShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new WriteShareGroupStateRequest.Builder(requestData, true).build()) + def getWriteShareGroupStateResponse(requestData: WriteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + writeStateResult: util.List[WriteShareGroupStateResponseData.WriteStateResult]): WriteShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new WriteShareGroupStateRequest.Builder(requestData).build()) val future = new CompletableFuture[WriteShareGroupStateResponseData]() when(shareCoordinator.writeState( any[RequestContext], any[WriteShareGroupStateRequestData] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -12698,17 +13877,17 @@ class KafkaApisTest extends Logging { response } - def getDeleteShareGroupResponse(requestData: DeleteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - deleteStateResult: util.List[DeleteShareGroupStateResponseData.DeleteStateResult]): DeleteShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new DeleteShareGroupStateRequest.Builder(requestData, true).build()) + def getDeleteShareGroupStateResponse(requestData: DeleteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + deleteStateResult: util.List[DeleteShareGroupStateResponseData.DeleteStateResult]): DeleteShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new DeleteShareGroupStateRequest.Builder(requestData).build()) val future = new CompletableFuture[DeleteShareGroupStateResponseData]() when(shareCoordinator.deleteState( any[RequestContext], any[DeleteShareGroupStateRequestData] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -12727,17 +13906,17 @@ class KafkaApisTest extends Logging { response } - def getInitializeShareGroupResponse(requestData: InitializeShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - initStateResult: util.List[InitializeShareGroupStateResponseData.InitializeStateResult]): InitializeShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new InitializeShareGroupStateRequest.Builder(requestData, true).build()) + def getInitializeShareGroupStateResponse(requestData: InitializeShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + initStateResult: util.List[InitializeShareGroupStateResponseData.InitializeStateResult]): InitializeShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new InitializeShareGroupStateRequest.Builder(requestData).build()) val future = new CompletableFuture[InitializeShareGroupStateResponseData]() when(shareCoordinator.initializeState( any[RequestContext], any[InitializeShareGroupStateRequestData] )).thenReturn(future) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = initializeMetadataCacheWithShareGroupsEnabled() kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 74e34b06dacd1..dc24a36951527 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -22,31 +22,32 @@ import java.util import java.util.{Arrays, Collections, Properties} import kafka.utils.TestUtils.assertBadConfigContainingMessage import kafka.utils.{CoreUtils, TestUtils} -import org.apache.kafka.common.Node -import org.apache.kafka.common.config.{ConfigException, SaslConfigs, SecurityConfig, SslConfigs, TopicConfig} +import org.apache.kafka.common.{Endpoint, Node} +import org.apache.kafka.common.config.{AbstractConfig, ConfigException, SaslConfigs, SecurityConfig, SslConfigs, TopicConfig} import org.apache.kafka.common.metrics.Sensor import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.record.{CompressionType, Records} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.config.internals.BrokerSecurityConfigs +import org.apache.kafka.common.utils.LogCaptureAppender import org.apache.kafka.coordinator.group.ConsumerGroupMigrationPolicy import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.network.EndPoint import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.storage.internals.log.CleanerConfig +import org.apache.logging.log4j.Level import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable -import org.apache.kafka.common.test.{TestUtils => JTestUtils} import scala.jdk.CollectionConverters._ +import scala.util.Using class KafkaConfigTest { @@ -222,7 +223,7 @@ class KafkaConfigTest { // but not duplicate names props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "HOST://localhost:9091,HOST://localhost:9091") - assertBadConfigContainingMessage(props, "Each listener must have a different name") + assertBadConfigContainingMessage(props, "Configuration 'advertised.listeners' values must not be duplicated.") } @Test @@ -247,8 +248,8 @@ class KafkaConfigTest { assertTrue(caught.getMessage.contains("If you have two listeners on the same port then one needs to be IPv4 and the other IPv6")) props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092,PLAINTEXT://127.0.0.1:9092") - caught = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) - assertTrue(caught.getMessage.contains("Each listener must have a different name")) + val exception = assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) + assertTrue(exception.getMessage.contains("values must not be duplicated.")) props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9092,SASL_SSL://127.0.0.1:9092") caught = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) @@ -300,7 +301,8 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") - assertBadConfigContainingMessage(props, "The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") + assertBadConfigContainingMessage(props, + "Missing required configuration \"controller.listener.names\" which has no default value.") props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") KafkaConfig.fromProps(props) @@ -320,7 +322,8 @@ class KafkaConfigTest { props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") assertFalse(isValidKafkaConfig(props)) - assertBadConfigContainingMessage(props, "controller.listener.names must contain at least one value when running KRaft with just the broker role") + assertBadConfigContainingMessage(props, + "Missing required configuration \"controller.listener.names\" which has no default value.") props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") KafkaConfig.fromProps(props) @@ -343,7 +346,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( - Seq(new EndPoint("lb1.example.com", 9000, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT)), + Seq(new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "lb1.example.com", 9000)), config.effectiveAdvertisedControllerListeners ) } @@ -359,7 +362,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( - Seq(new EndPoint("localhost", 9093, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT)), + Seq(new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093)), config.effectiveAdvertisedControllerListeners ) } @@ -377,8 +380,8 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( Seq( - new EndPoint("lb1.example.com", 9000, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT), - new EndPoint("localhost", 9094, ListenerName.normalised("CONTROLLER_NEW"), SecurityProtocol.PLAINTEXT) + new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "lb1.example.com", 9000), + new Endpoint("CONTROLLER_NEW", SecurityProtocol.PLAINTEXT, "localhost", 9094) ), config.effectiveAdvertisedControllerListeners ) @@ -442,7 +445,7 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") val controllerListenerName = new ListenerName("CONTROLLER") - assertEquals(Some(SecurityProtocol.PLAINTEXT), + assertEquals(SecurityProtocol.PLAINTEXT, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(controllerListenerName)) // ensure we don't map it to PLAINTEXT when there is a SSL or SASL controller listener props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER,SSL") @@ -455,7 +458,7 @@ class KafkaConfigTest { props.remove(SocketServerConfigs.LISTENERS_CONFIG) // ensure we don't map it to PLAINTEXT when it is explicitly mapped otherwise props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,CONTROLLER:SSL") - assertEquals(Some(SecurityProtocol.SSL), + assertEquals(SecurityProtocol.SSL, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(controllerListenerName)) // ensure we don't map it to PLAINTEXT when anything is explicitly given // (i.e. it is only part of the default value, even with KRaft) @@ -464,7 +467,7 @@ class KafkaConfigTest { // ensure we can map it to a non-PLAINTEXT security protocol by default (i.e. when nothing is given) props.remove(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG) props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - assertEquals(Some(SecurityProtocol.SSL), + assertEquals(SecurityProtocol.SSL, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("SSL"))) } @@ -476,9 +479,9 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER1,CONTROLLER2") props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "1@localhost:9092") - assertEquals(Some(SecurityProtocol.PLAINTEXT), + assertEquals(SecurityProtocol.PLAINTEXT, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER1"))) - assertEquals(Some(SecurityProtocol.PLAINTEXT), + assertEquals(SecurityProtocol.PLAINTEXT, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER2"))) } @@ -507,16 +510,16 @@ class KafkaConfigTest { props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "REPLICATION") val config = KafkaConfig.fromProps(props) val expectedListeners = Seq( - new EndPoint("localhost", 9091, new ListenerName("CLIENT"), SecurityProtocol.SSL), - new EndPoint("localhost", 9092, new ListenerName("REPLICATION"), SecurityProtocol.SSL), - new EndPoint("localhost", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT)) + new Endpoint("CLIENT", SecurityProtocol.SSL, "localhost", 9091), + new Endpoint("REPLICATION", SecurityProtocol.SSL, "localhost", 9092), + new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093)) assertEquals(expectedListeners, config.listeners) assertEquals(expectedListeners, config.effectiveAdvertisedBrokerListeners) - val expectedSecurityProtocolMap = Map( - new ListenerName("CLIENT") -> SecurityProtocol.SSL, - new ListenerName("REPLICATION") -> SecurityProtocol.SSL, - new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, - new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT + val expectedSecurityProtocolMap = util.Map.of( + new ListenerName("CLIENT"), SecurityProtocol.SSL, + new ListenerName("REPLICATION"), SecurityProtocol.SSL, + new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -536,21 +539,21 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) val expectedListeners = Seq( - new EndPoint("localhost", 9091, new ListenerName("EXTERNAL"), SecurityProtocol.SSL), - new EndPoint("localhost", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT) + new Endpoint("EXTERNAL", SecurityProtocol.SSL, "localhost", 9091), + new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093) ) assertEquals(expectedListeners, config.listeners) val expectedAdvertisedListeners = Seq( - new EndPoint("lb1.example.com", 9000, new ListenerName("EXTERNAL"), SecurityProtocol.SSL), - new EndPoint("host1", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT) + new Endpoint("EXTERNAL", SecurityProtocol.SSL, "lb1.example.com", 9000), + new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "host1", 9093) ) assertEquals(expectedAdvertisedListeners, config.effectiveAdvertisedBrokerListeners) - val expectedSecurityProtocolMap = Map( - new ListenerName("EXTERNAL") -> SecurityProtocol.SSL, - new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, - new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT + val expectedSecurityProtocolMap = util.Map.of( + new ListenerName("EXTERNAL"), SecurityProtocol.SSL, + new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -593,12 +596,21 @@ class KafkaConfigTest { props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "plaintext://localhost:9091,SsL://localhost:9092") props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,SSL:SSL,CONTROLLER:PLAINTEXT") val config = KafkaConfig.fromProps(props) - assertEquals(Some("SSL://localhost:9092"), config.listeners.find(_.listenerName.value == "SSL").map(JTestUtils.endpointToString)) - assertEquals(Some("PLAINTEXT://localhost:9091"), config.listeners.find(_.listenerName.value == "PLAINTEXT").map(JTestUtils.endpointToString)) + assertEndpointsEqual(new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9092), + config.listeners.find(_.listener == "SSL").getOrElse(fail("SSL endpoint not found"))) + assertEndpointsEqual( new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9091), + config.listeners.find(_.listener == "PLAINTEXT").getOrElse(fail("PLAINTEXT endpoint not found"))) } - private def listenerListToEndPoints(listenerList: String, - securityProtocolMap: collection.Map[ListenerName, SecurityProtocol] = SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO.asScala) = + private def assertEndpointsEqual(expected: Endpoint, actual: Endpoint): Unit = { + assertEquals(expected.host(), actual.host(), "Host mismatch") + assertEquals(expected.port(), actual.port(), "Port mismatch") + assertEquals(expected.listener(), actual.listener(), "Listener mismatch") + assertEquals(expected.securityProtocol(), actual.securityProtocol(), "Security protocol mismatch") + } + + private def listenerListToEndPoints(listenerList: java.util.List[String], + securityProtocolMap: util.Map[ListenerName, SecurityProtocol] = SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO) = CoreUtils.listenerListToEndPoints(listenerList, securityProtocolMap) @Test @@ -611,9 +623,9 @@ class KafkaConfigTest { // configuration with no listeners val conf = KafkaConfig.fromProps(props) - assertEquals(listenerListToEndPoints("PLAINTEXT://:9092"), conf.listeners) + assertEquals(listenerListToEndPoints(util.List.of("PLAINTEXT://:9092")), conf.listeners) assertNull(conf.listeners.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).get.host) - assertEquals(conf.effectiveAdvertisedBrokerListeners, listenerListToEndPoints("PLAINTEXT://:9092")) + assertEquals(conf.effectiveAdvertisedBrokerListeners, listenerListToEndPoints(util.List.of("PLAINTEXT://:9092"))) } private def isValidKafkaConfig(props: Properties): Boolean = { @@ -775,6 +787,7 @@ class KafkaConfigTest { KafkaConfig.configNames.foreach { name => name match { + case AbstractConfig.CONFIG_PROVIDERS_CONFIG => // ignore string case ServerConfigs.BROKER_ID_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerConfigs.NUM_IO_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") case ServerConfigs.BACKGROUND_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") @@ -794,6 +807,10 @@ class KafkaConfigTest { case MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG => // no op + case MetadataLogConfig.INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG => // no op + case MetadataLogConfig.INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG => // no op + case MetadataLogConfig.INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG => // no op case KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG => // ignore string case MetadataLogConfig.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -812,8 +829,8 @@ class KafkaConfigTest { case SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") case ServerLogConfigs.NUM_PARTITIONS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") - case ServerLogConfigs.LOG_DIRS_CONFIG => // ignore string - case ServerLogConfigs.LOG_DIR_CONFIG => // ignore string + case ServerLogConfigs.LOG_DIRS_CONFIG => assertPropertyInvalid(baseProperties, name, "") + case ServerLogConfigs.LOG_DIR_CONFIG => assertPropertyInvalid(baseProperties, name, "") case ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", Records.LOG_OVERHEAD - 1) case ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") @@ -854,7 +871,7 @@ class KafkaConfigTest { case ReplicationConfigs.REPLICA_FETCH_MIN_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.REPLICA_FETCH_RESPONSE_MAX_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG => // Ignore string - case ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0", "-1") case ReplicationConfigs.REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.PRODUCER_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -943,16 +960,33 @@ class KafkaConfigTest { case SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS => case SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS => case SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS => - case SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME => - case SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME => - case SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE => + case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE => + case SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID => + case SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET => case SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS => case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE => case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER => + case SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL => + case SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS => + case SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS => + case SaslConfigs.SASL_OAUTHBEARER_SCOPE => + case SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME => + case SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME => + case SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL => // Security config case SecurityConfig.SECURITY_PROVIDERS_CONFIG => @@ -990,6 +1024,7 @@ class KafkaConfigTest { case RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP => // ignore string case RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) + case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) @@ -1026,7 +1061,6 @@ class KafkaConfigTest { case ShareGroupConfig.SHARE_GROUP_MIN_RECORD_LOCK_DURATION_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_GROUP_MAX_RECORD_LOCK_DURATION_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_GROUP_PARTITION_MAX_RECORD_LOCKS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case ShareGroupConfig.SHARE_GROUP_MAX_GROUPS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case GroupCoordinatorConfig.SHARE_GROUP_MAX_SIZE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ShareGroupConfig.SHARE_GROUP_MAX_SHARE_SESSIONS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) @@ -1137,6 +1171,8 @@ class KafkaConfigTest { // topic only config case QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG => // topic only config + case "internal.segment.bytes" => + // topic internal config case prop => fail(prop + " must be explicitly checked for dynamic updatability. Note that LogConfig(s) require that KafkaConfig value lookups are dynamic and not static values.") } @@ -1164,9 +1200,10 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(defaults) assertEquals(1, config.brokerId) - assertEquals(Seq("PLAINTEXT://127.0.0.1:1122"), config.effectiveAdvertisedBrokerListeners.map(JTestUtils.endpointToString)) + assertEndpointsEqual(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "127.0.0.1", 1122), + config.effectiveAdvertisedBrokerListeners.head) assertEquals(Map("127.0.0.1" -> 2, "127.0.0.2" -> 3), config.maxConnectionsPerIpOverrides) - assertEquals(List("/tmp1", "/tmp2"), config.logDirs) + assertEquals(util.List.of("/tmp1", "/tmp2"), config.logDirs) assertEquals(12 * 60L * 1000L * 60, config.logRollTimeMillis) assertEquals(11 * 60L * 1000L * 60, config.logRollTimeJitterMillis) assertEquals(10 * 60L * 1000L * 60, config.logRetentionTimeMillis) @@ -1457,6 +1494,18 @@ class KafkaConfigTest { assertEquals(expected, addresses) } + @Test + def testInvalidQuorumAutoJoinForKRaftBroker(): Unit = { + val props = TestUtils.createBrokerConfig(0) + props.setProperty(QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG, String.valueOf(true)) + assertEquals( + "requirement failed: controller.quorum.auto.join.enable is only " + + "supported when process.roles contains the 'controller' role.", + assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage + ) + + } + @Test def testAcceptsLargeId(): Unit = { val largeBrokerId = 2000 @@ -1501,7 +1550,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals(metadataDir, config.metadataLogDir) - assertEquals(Seq(dataDir), config.logDirs) + assertEquals(util.List.of(dataDir), config.logDirs) } @Test @@ -1519,7 +1568,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals(dataDir1, config.metadataLogDir) - assertEquals(Seq(dataDir1, dataDir2), config.logDirs) + assertEquals(util.List.of(dataDir1, dataDir2), config.logDirs) } @Test @@ -1553,6 +1602,7 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertEquals("You must set `node.id` to the same value as `broker.id`.", assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage()) } @@ -1744,13 +1794,11 @@ class KafkaConfigTest { props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer") var config = KafkaConfig.fromProps(props) assertEquals(Set(GroupType.CLASSIC, GroupType.CONSUMER), config.groupCoordinatorRebalanceProtocols) - assertFalse(config.shareGroupConfig.isShareGroupEnabled) // This is OK. props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,share") config = KafkaConfig.fromProps(props) assertEquals(Set(GroupType.CLASSIC, GroupType.CONSUMER, GroupType.SHARE), config.groupCoordinatorRebalanceProtocols) - assertTrue(config.shareGroupConfig.isShareGroupEnabled) props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,streams") val config2 = KafkaConfig.fromProps(props) @@ -1849,4 +1897,27 @@ class KafkaConfigTest { props.put(ShareGroupConfig.SHARE_GROUP_RECORD_LOCK_DURATION_MS_CONFIG, "30000") assertDoesNotThrow(() => KafkaConfig.fromProps(props)) } + + @Test + def testLowercaseControllerListenerNames(): Unit = { + val props = createDefaultConfig() + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "controller") + val message = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage + assertEquals("requirement failed: controller.listener.names must contain at least one value appearing in the 'listeners' configuration when running the KRaft controller role", message) + } + + @Test + def testLogBrokerHeartbeatIntervalMsShouldBeLowerThanHalfOfBrokerSessionTimeoutMs(): Unit = { + val props = createDefaultConfig() + Using.resource(LogCaptureAppender.createAndRegister) { appender => + appender.setClassLogger(KafkaConfig.getClass, Level.ERROR) + props.setProperty(KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG, "4500") + props.setProperty(KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG, "8999") + KafkaConfig.fromProps(props) + assertTrue(appender.getMessages.contains("broker.heartbeat.interval.ms (4500 ms) must be less than or equal to half of the broker.session.timeout.ms (8999 ms). " + + "The broker.session.timeout.ms is configured on controller. The broker.heartbeat.interval.ms is configured on broker. " + + "If a broker doesn't send heartbeat request within broker.session.timeout.ms, it loses broker lease. " + + "Please increase broker.session.timeout.ms or decrease broker.heartbeat.interval.ms.")) + } + } } diff --git a/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala b/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala index 3d4ea198753b8..51c5d192c6d3f 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala @@ -24,9 +24,7 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.metrics.MetricConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.net.Socket import java.util.concurrent.atomic.AtomicInteger @@ -63,9 +61,8 @@ class KafkaMetricReporterExceptionHandlingTest extends BaseRequestTest { super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testBothReportersAreInvoked(quorum: String): Unit = { + @Test + def testBothReportersAreInvoked(): Unit = { val port = anySocketServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) val socket = new Socket("localhost", port) socket.setSoTimeout(10000) diff --git a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala index 23cc8509627d2..c8692661134ee 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala @@ -23,10 +23,8 @@ import org.apache.kafka.common.metrics.{KafkaMetric, MetricsContext, MetricsRepo import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.test.{TestUtils => JTestUtils} -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource object KafkaMetricsReporterTest { @@ -78,9 +76,8 @@ class KafkaMetricsReporterTest extends QuorumTestHarness { broker.startup() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testMetricsContextNamespacePresent(quorum: String): Unit = { + @Test + def testMetricsContextNamespacePresent(): Unit = { assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.CLUSTERID.get()) assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.NODEID.get()) assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.JMXPREFIX.get()) diff --git a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala index 5ba6ef34603a3..304e63602a3d6 100644 --- a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala @@ -21,11 +21,12 @@ import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartit import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse} +import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource import org.apache.kafka.common.{IsolationLevel, TopicPartition} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import java.util.{Optional, Properties} import scala.collection.Seq @@ -43,9 +44,8 @@ class ListOffsetsRequestTest extends BaseRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testListOffsetsErrorCodes(quorum: String): Unit = { + @Test + def testListOffsetsErrorCodes(): Unit = { val targetTimes = List(new ListOffsetsTopic() .setName(topic) .setPartitions(List(new ListOffsetsPartition() @@ -108,9 +108,8 @@ class ListOffsetsRequestTest extends BaseRequestTest { assertResponseError(error, brokerId, request) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCurrentEpochValidation(quorum: String): Unit = { + @Test + def testCurrentEpochValidation(): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) @@ -168,9 +167,8 @@ class ListOffsetsRequestTest extends BaseRequestTest { (partitionData.offset, partitionData.leaderEpoch, partitionData.errorCode()) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testResponseIncludesLeaderEpoch(quorum: String): Unit = { + @Test + def testResponseIncludesLeaderEpoch(): Unit = { val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) val firstLeaderId = partitionToLeader(partition.partition) @@ -210,46 +208,38 @@ class ListOffsetsRequestTest extends BaseRequestTest { } @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testResponseDefaultOffsetAndLeaderEpochForAllVersions(quorum: String): Unit = { + @ApiKeyVersionsSource(apiKey = ApiKeys.LIST_OFFSETS) + def testResponseDefaultOffsetAndLeaderEpochForAllVersions(version: Short): Unit = { val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) val firstLeaderId = partitionToLeader(partition.partition) TestUtils.generateAndProduceMessages(brokers, topic, 9) TestUtils.produceMessage(brokers, topic, "test-10", System.currentTimeMillis() + 10L) - for (version <- ApiKeys.LIST_OFFSETS.oldestVersion to ApiKeys.LIST_OFFSETS.latestVersion) { - if (version == 0) { - assertEquals((-1L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) - assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) - assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) - } else if (version >= 1 && version <= 3) { - assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) - assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) - assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) - } else if (version >= 4 && version <= 6) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) - } else if (version == 7) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) - assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) - } else if (version >= 8) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) - assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) - } + if (version >= 1 && version <= 3) { + assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) + assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) + assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) + } else if (version >= 4 && version <= 6) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) + } else if (version == 7) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) + assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) + } else if (version >= 8) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) + assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) } } diff --git a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala index 6b73e57099981..fae2b32b86bb7 100755 --- a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala @@ -26,9 +26,7 @@ import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffset import org.apache.kafka.common.{IsolationLevel, TopicPartition} import org.apache.kafka.storage.internals.log.{LogStartOffsetIncrementReason, OffsetResultHolder, UnifiedLog} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Timeout -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{Test, Timeout} import java.io.File import java.util.{Optional, Properties, Random} @@ -47,9 +45,8 @@ class LogOffsetTest extends BaseRequestTest { props.put("log.retention.check.interval.ms", (5 * 1000 * 60).toString) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGetOffsetsForUnknownTopic(quorum: String): Unit = { + @Test + def testGetOffsetsForUnknownTopic(): Unit = { val topicPartition = new TopicPartition("foo", 0) val request = ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP).asJava).build(1) @@ -58,9 +55,8 @@ class LogOffsetTest extends BaseRequestTest { } @deprecated("ListOffsetsRequest V0", since = "") - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGetOffsetsAfterDeleteRecords(quorum: String): Unit = { + @Test + def testGetOffsetsAfterDeleteRecords(): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -84,9 +80,8 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(20L, consumerOffset) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchOffsetByTimestampForMaxTimestampAfterTruncate(quorum: String): Unit = { + @Test + def testFetchOffsetByTimestampForMaxTimestampAfterTruncate(): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -106,9 +101,8 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(Optional.empty, log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty).timestampAndOffsetOpt) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchOffsetByTimestampForMaxTimestampWithUnorderedTimestamps(quorum: String): Unit = { + @Test + def testFetchOffsetByTimestampForMaxTimestampWithUnorderedTimestamps(): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -125,9 +119,8 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(6L, maxTimestampOffset.get.timestamp) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGetOffsetsBeforeLatestTime(quorum: String): Unit = { + @Test + def testGetOffsetsBeforeLatestTime(): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -158,9 +151,8 @@ class LogOffsetTest extends BaseRequestTest { assertFalse(FetchResponse.recordsOrFail(fetchResponse.responseData(topicNames, ApiKeys.FETCH.latestVersion).get(topicPartition)).batches.iterator.hasNext) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testEmptyLogsGetOffsets(quorum: String): Unit = { + @Test + def testEmptyLogsGetOffsets(): Unit = { val random = new Random val topic = "kafka-" val topicPartition = new TopicPartition(topic, random.nextInt(10)) @@ -182,9 +174,8 @@ class LogOffsetTest extends BaseRequestTest { assertFalse(offsetChanged) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testFetchOffsetByTimestampForMaxTimestampWithEmptyLog(quorum: String): Unit = { + @Test + def testFetchOffsetByTimestampForMaxTimestampWithEmptyLog(): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -195,9 +186,8 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGetOffsetsBeforeEarliestTime(quorum: String): Unit = { + @Test + def testGetOffsetsBeforeEarliestTime(): Unit = { val random = new Random val topic = "kafka-" val topicPartition = new TopicPartition(topic, random.nextInt(3)) diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala index f9970d2967afa..5db1e0873a3d2 100755 --- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala @@ -27,9 +27,7 @@ import org.apache.kafka.common.serialization.{IntegerSerializer, StringSerialize import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.io.File import java.util.Properties @@ -62,10 +60,20 @@ class LogRecoveryTest extends QuorumTestHarness { var admin: Admin = _ var producer: KafkaProducer[Integer, String] = _ - def hwFile1 = new OffsetCheckpointFile(new File(configProps1.logDirs.head, ReplicaManager.HighWatermarkFilename), null) - def hwFile2 = new OffsetCheckpointFile(new File(configProps2.logDirs.head, ReplicaManager.HighWatermarkFilename), null) + def hwFile1 = new OffsetCheckpointFile(new File(configProps1.logDirs.get(0), ReplicaManager.HighWatermarkFilename), null) + def hwFile2 = new OffsetCheckpointFile(new File(configProps2.logDirs.get(0), ReplicaManager.HighWatermarkFilename), null) var servers = Seq.empty[KafkaBroker] + // testHWCheckpointWithFailuresMultipleLogSegments simulates broker failures that can leave the only available replica out of the + // ISR. By enabling unclean leader election, we ensure that the test can proceed and elect + // the out-of-sync replica as the new leader, which is necessary to validate the log + // recovery and high-watermark checkpointing logic under these specific failure conditions. + override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { + val properties = new Properties() + properties.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") + Seq(properties) + } + // Some tests restart the brokers then produce more data. But since test brokers use random ports, we need // to use a new producer that knows the new ports def updateProducer(): Unit = { @@ -104,9 +112,8 @@ class LogRecoveryTest extends QuorumTestHarness { super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHWCheckpointNoFailuresSingleLogSegment(quorum: String): Unit = { + @Test + def testHWCheckpointNoFailuresSingleLogSegment(): Unit = { val numMessages = 2L sendMessages(numMessages.toInt) @@ -122,9 +129,8 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(numMessages, followerHW) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHWCheckpointWithFailuresSingleLogSegment(quorum: String): Unit = { + @Test + def testHWCheckpointWithFailuresSingleLogSegment(): Unit = { var leader = getLeaderIdForPartition(servers, topicPartition) assertEquals(0L, hwFile1.read().getOrDefault(topicPartition, 0L)) @@ -183,9 +189,8 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(hw, hwFile2.read().getOrDefault(topicPartition, 0L)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHWCheckpointNoFailuresMultipleLogSegments(quorum: String): Unit = { + @Test + def testHWCheckpointNoFailuresMultipleLogSegments(): Unit = { sendMessages(20) val hw = 20L // give some time for follower 1 to record leader HW of 600 @@ -200,9 +205,8 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(hw, followerHW) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testHWCheckpointWithFailuresMultipleLogSegments(quorum: String): Unit = { + @Test + def testHWCheckpointWithFailuresMultipleLogSegments(): Unit = { var leader = getLeaderIdForPartition(servers, topicPartition) sendMessages(2) @@ -221,7 +225,7 @@ class LogRecoveryTest extends QuorumTestHarness { server2.startup() updateProducer() // check if leader moves to the other server - leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader)) + leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader), timeout = 30000L) assertEquals(1, leader, "Leader must move to broker 1") assertEquals(hw, hwFile1.read().getOrDefault(topicPartition, 0L)) diff --git a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala index 2b2250ff95de4..181fd2f644c66 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala @@ -27,9 +27,7 @@ import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} import org.apache.kafka.metadata.BrokerState import org.apache.kafka.test.TestUtils.isValidClusterId import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import scala.collection.Seq import scala.jdk.CollectionConverters._ @@ -41,24 +39,21 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { doSetup(testInfo, createOffsetsTopic = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterIdWithRequestVersion1(quorum: String): Unit = { + @Test + def testClusterIdWithRequestVersion1(): Unit = { val v1MetadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) val v1ClusterId = v1MetadataResponse.clusterId assertNull(v1ClusterId, s"v1 clusterId should be null") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterIdIsValid(quorum: String): Unit = { + @Test + def testClusterIdIsValid(): Unit = { val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) isValidClusterId(metadataResponse.clusterId) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testRack(quorum: String): Unit = { + @Test + def testRack(): Unit = { val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) // Validate rack matches what's set in generateConfigs() above metadataResponse.brokers.forEach { broker => @@ -66,9 +61,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIsInternal(quorum: String): Unit = { + @Test + def testIsInternal(): Unit = { val internalTopic = Topic.GROUP_METADATA_TOPIC_NAME val notInternalTopic = "notInternal" // create the topics @@ -88,9 +82,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(Set(internalTopic).asJava, metadataResponse.buildCluster().internalTopics) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testNoTopicsRequest(quorum: String): Unit = { + @Test + def testNoTopicsRequest(): Unit = { // create some topics createTopic("t1", 3, 2) createTopic("t2", 3, 2) @@ -100,9 +93,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertTrue(metadataResponse.topicMetadata.isEmpty, "Response should have no topics") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAutoTopicCreation(quorum: String): Unit = { + @Test + def testAutoTopicCreation(): Unit = { val topic1 = "t1" val topic2 = "t2" val topic3 = "t3" @@ -128,9 +120,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic5)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAutoCreateTopicWithInvalidReplicationFactor(quorum: String): Unit = { + @Test + def testAutoCreateTopicWithInvalidReplicationFactor(): Unit = { // Shutdown all but one broker so that the number of brokers is less than the default replication factor brokers.tail.foreach(_.shutdown()) brokers.tail.foreach(_.awaitShutdown()) @@ -144,9 +135,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(0, topicMetadata.partitionMetadata.size) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAllTopicsRequest(quorum: String): Unit = { + @Test + def testAllTopicsRequest(): Unit = { // create some topics createTopic("t1", 3, 2) createTopic("t2", 3, 2) @@ -162,9 +152,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(2, metadataResponseV1.topicMetadata.size(), "V1 Response should have 2 (all) topics") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testTopicIdsInResponse(quorum: String): Unit = { + @Test + def testTopicIdsInResponse(): Unit = { val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1)) val topic1 = "topic1" val topic2 = "topic2" @@ -192,9 +181,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { /** * Preferred replica should be the first item in the replicas list */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testPreferredReplica(quorum: String): Unit = { + @Test + def testPreferredReplica(): Unit = { val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1)) createTopicWithAssignment("t1", replicaAssignment) // Test metadata on two different brokers to ensure that metadata propagation works correctly @@ -216,9 +204,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testReplicaDownResponse(quorum: String): Unit = { + @Test + def testReplicaDownResponse(): Unit = { val replicaDownTopic = "replicaDown" val replicaCount = 3 @@ -262,9 +249,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(replicaCount, v1PartitionMetadata.replicaIds.size, s"Response should have $replicaCount replicas") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testIsrAfterBrokerShutDownAndJoinsBack(quorum: String): Unit = { + @Test + def testIsrAfterBrokerShutDownAndJoinsBack(): Unit = { def checkIsr[B <: KafkaBroker]( brokers: Seq[B], topic: String @@ -300,9 +286,8 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { checkIsr(brokers, topic) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAliveBrokersWithNoTopics(quorum: String): Unit = { + @Test + def testAliveBrokersWithNoTopics(): Unit = { def checkMetadata[B <: KafkaBroker]( brokers: Seq[B], expectedBrokersCount: Int diff --git a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala index 2695663fdc366..5165debe66cd7 100644 --- a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala +++ b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala @@ -22,6 +22,7 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_ import org.apache.kafka.common.requests.FetchResponse import org.apache.kafka.common.utils.Time import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.ReplicaState import org.apache.kafka.common.TopicPartition import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -178,7 +179,7 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, def verifyLastFetchedEpoch(partition: TopicPartition, expectedEpoch: Option[Int]): Unit = { if (leader.isTruncationOnFetchSupported) { - assertEquals(Some(Fetching), fetchState(partition).map(_.state)) + assertEquals(Some(ReplicaState.FETCHING), fetchState(partition).map(_.state)) assertEquals(expectedEpoch, fetchState(partition).map(_.lastFetchedEpoch.get())) } } diff --git a/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala b/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala index b80387f62af19..96e43955d9ea7 100644 --- a/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala +++ b/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala @@ -17,9 +17,7 @@ package kafka.server -import kafka.server.AbstractFetcherThread.ReplicaFetch -import kafka.server.AbstractFetcherThread.ResultWithPartitions -import org.apache.kafka.common.message.FetchResponseData +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record._ @@ -28,10 +26,11 @@ import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.{LeaderEndPoint, PartitionFetchState, ReplicaFetch, ResultWithPartitions} import java.nio.ByteBuffer import java.util.Optional -import scala.collection.{Map, Set, mutable} +import scala.collection.{mutable} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOptional import scala.util.Random @@ -76,7 +75,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { fetchRequest.fetchData.asScala.map { case (partition, fetchData) => val leaderState = leaderPartitionState(partition) val epochCheckError = checkExpectedLeaderEpoch(fetchData.currentLeaderEpoch, leaderState) @@ -105,7 +104,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l (Errors.NONE, records) } - val partitionData = new FetchData() + val partitionData = new FetchResponseData.PartitionData() .setPartitionIndex(partition.partition) .setErrorCode(error.code) .setHighWatermark(leaderState.highWatermark) @@ -115,7 +114,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l divergingEpoch.foreach(partitionData.setDivergingEpoch) (partition, partitionData) - }.toMap + }.toMap.asJava } override def fetchEarliestOffset(topicPartition: TopicPartition, leaderEpoch: Int): OffsetAndEpoch = { @@ -136,9 +135,9 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l new OffsetAndEpoch(leaderState.localLogStartOffset, leaderState.leaderEpoch) } - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { - val endOffsets = mutable.Map[TopicPartition, EpochEndOffset]() - partitions.foreachEntry { (partition, epochData) => + override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { + val endOffsets = new java.util.HashMap[TopicPartition, EpochEndOffset]() + partitions.forEach { (partition, epochData) => assert(partition.partition == epochData.partition, "Partition must be consistent between TopicPartition and EpochData") val leaderState = leaderPartitionState(partition) @@ -148,9 +147,9 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l endOffsets } - override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { + override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = { val fetchData = mutable.Map.empty[TopicPartition, FetchRequest.PartitionData] - partitionMap.foreach { case (partition, state) => + partitions.forEach { case (partition, state) => if (state.isReadyForFetch) { val replicaState = replicaPartitionStateCallback(partition).getOrElse(throw new IllegalArgumentException(s"Unknown partition $partition")) val lastFetchedEpoch = if (isTruncationOnFetchSupported) @@ -158,17 +157,17 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l else Optional.empty[Integer] fetchData.put(partition, - new FetchRequest.PartitionData(state.topicId.getOrElse(Uuid.ZERO_UUID), state.fetchOffset, replicaState.logStartOffset, + new FetchRequest.PartitionData(state.topicId.orElse(Uuid.ZERO_UUID), state.fetchOffset, replicaState.logStartOffset, 1024 * 1024, Optional.of[Integer](state.currentLeaderEpoch), lastFetchedEpoch)) } } val fetchRequest = FetchRequest.Builder.forReplica(version, replicaId, 1, 0, 1, fetchData.asJava) val fetchRequestOpt = if (fetchData.isEmpty) - None + java.util.Optional.empty[ReplicaFetch]() else - Some(ReplicaFetch(fetchData.asJava, fetchRequest)) - ResultWithPartitions(fetchRequestOpt, Set.empty) + Optional.of(new ReplicaFetch(fetchData.asJava, fetchRequest)) + new ResultWithPartitions(fetchRequestOpt, java.util.Collections.emptySet()) } private def checkLeaderEpochAndThrow(expectedEpoch: Int, partitionState: PartitionState): Unit = { @@ -206,9 +205,9 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l partitionState: PartitionState): Option[FetchResponseData.EpochEndOffset] = { lastFetchedEpoch.toScala.flatMap { fetchEpoch => val epochEndOffset = fetchEpochEndOffsets( - Map(topicPartition -> new EpochData() + java.util.Map.of(topicPartition, new OffsetForLeaderEpochRequestData.OffsetForLeaderPartition() .setPartition(topicPartition.partition) - .setLeaderEpoch(fetchEpoch)))(topicPartition) + .setLeaderEpoch(fetchEpoch))).get(topicPartition) if (partitionState.log.isEmpty || epochEndOffset.endOffset == UNDEFINED_EPOCH_OFFSET @@ -224,7 +223,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l } def lookupEndOffsetForEpoch(topicPartition: TopicPartition, - epochData: EpochData, + epochData: OffsetForLeaderEpochRequestData.OffsetForLeaderPartition, partitionState: PartitionState): EpochEndOffset = { checkExpectedLeaderEpoch(epochData.currentLeaderEpoch, partitionState).foreach { error => return new EpochEndOffset() diff --git a/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala b/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala index 1e368cae6e392..ca37d9a3f19f8 100644 --- a/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala +++ b/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala @@ -19,6 +19,9 @@ package kafka.server import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.message.FetchResponseData +import org.apache.kafka.server.LeaderEndPoint +import org.apache.kafka.server.PartitionFetchState +import org.apache.kafka.server.ReplicaState import java.util.Optional @@ -33,8 +36,10 @@ class MockTierStateMachine(leader: LeaderEndPoint) extends TierStateMachine(lead val offsetToFetch = leader.fetchEarliestLocalOffset(topicPartition, currentFetchState.currentLeaderEpoch).offset val initialLag = leaderEndOffset - offsetToFetch fetcher.truncateFullyAndStartAt(topicPartition, offsetToFetch) - PartitionFetchState(currentFetchState.topicId, offsetToFetch, Option.apply(initialLag), currentFetchState.currentLeaderEpoch, - Fetching, Optional.of(currentFetchState.currentLeaderEpoch)) + new PartitionFetchState(currentFetchState.topicId, offsetToFetch, Optional.of(initialLag), + currentFetchState.currentLeaderEpoch, Optional.empty(), ReplicaState.FETCHING, + Optional.of(currentFetchState.currentLeaderEpoch) + ) } def setFetcher(mockFetcherThread: MockFetcherThread): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala index f289c241d1b4b..eceb21a407787 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala @@ -16,6 +16,7 @@ */ package kafka.server +import org.apache.kafka.common.Uuid import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance @@ -46,7 +47,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -55,7 +56,6 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator // a session long enough for the duration of the test. val (memberId, memberEpoch) = joinConsumerGroup("grp", useNewProtocol) - // Start from version 1 because version 0 goes to ZK. for (version <- ApiKeys.OFFSET_COMMIT.oldestVersion to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { // Commit offset. commitOffset( @@ -63,6 +63,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = if (useNewProtocol && version < 9) Errors.UNSUPPORTED_VERSION else Errors.NONE, @@ -75,6 +76,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = @@ -89,6 +91,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = @@ -103,6 +106,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = "", memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.UNKNOWN_MEMBER_ID, @@ -115,6 +119,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch + 1, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = @@ -131,11 +136,27 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = "", memberEpoch = -1, topic = "foo", + topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.NONE, version = version.toShort ) + + // Commit offset to a group with an unknown topic id. + if (version >= 10) { + commitOffset( + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch, + topic = "bar", + topicId = Uuid.randomUuid(), + partition = 0, + offset = 100L, + expectedError = Errors.UNKNOWN_TOPIC_ID, + version = version.toShort + ) + } } } } diff --git a/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala index c9201b24e9870..0fc414e24c99e 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala @@ -45,7 +45,7 @@ class OffsetDeleteRequestTest(cluster: ClusterInstance) extends GroupCoordinator createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -65,6 +65,7 @@ class OffsetDeleteRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, diff --git a/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala index b49de57793172..75bf82ef155d7 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala @@ -17,8 +17,8 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.message.OffsetFetchResponseData +import org.apache.kafka.common.Uuid +import org.apache.kafka.common.message.{OffsetFetchRequestData, OffsetFetchResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig @@ -70,8 +70,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB // in this test because it does not use FindCoordinator API. createOffsetsTopic() + val unknownTopicId = Uuid.randomUuid() + // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -87,6 +89,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -94,7 +97,6 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ) } - // Start from version 1 because version 0 goes to ZK. for (version <- 1 to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { // Fetch with partitions. assertEquals( @@ -102,7 +104,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -116,14 +119,16 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch, - partitions = List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1), - new TopicPartition("foo", 5) // This one does not exist. - ), + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. + ).asJava), requireStable = requireStable, version = version.toShort ) @@ -135,7 +140,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("unknown") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -149,14 +155,16 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - groupId = "unknown", - memberId = memberId, - memberEpoch = memberEpoch, - partitions = List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1), - new TopicPartition("foo", 5) // This one does not exist. - ), + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("unknown") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. + ).asJava), requireStable = requireStable, version = version.toShort ) @@ -168,7 +176,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("unknown") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -178,22 +187,30 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setCommittedOffset(-1L) ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo-unknown") + .setName(if (version < 10) "foo-unknown" else "") + .setTopicId(if (version >= 10) unknownTopicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(1) .setCommittedOffset(-1L) + .setErrorCode(if (version >= 10) Errors.UNKNOWN_TOPIC_ID.code else Errors.NONE.code) ).asJava), ).asJava), fetchOffsets( - groupId = "unknown", - memberId = memberId, - memberEpoch = memberEpoch, - partitions = List( - new TopicPartition("foo", 0), - new TopicPartition("foo-unknown", 1), - new TopicPartition("foo", 5) // This one does not exist. - ), + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("unknown") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 5).asJava), // 5 does not exist. + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo-unknown") + .setTopicId(unknownTopicId) + .setPartitionIndexes(List[Integer](1).asJava) // 5 does not exist. + ).asJava), requireStable = requireStable, version = version.toShort ) @@ -206,10 +223,47 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code), fetchOffsets( - groupId = "grp", - memberId = "", - memberEpoch = memberEpoch, - partitions = List.empty, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId("") + .setMemberEpoch(memberEpoch) + .setTopics(List.empty.asJava), + requireStable = requireStable, + version = version.toShort + ) + ) + + // Fetch with empty group id. + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("") + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(-1L), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(-1L), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(5) + .setCommittedOffset(-1L) + ).asJava) + ).asJava), + fetchOffsets( + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. + ).asJava), requireStable = requireStable, version = version.toShort ) @@ -221,10 +275,11 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.STALE_MEMBER_EPOCH.code), fetchOffsets( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch + 1, - partitions = List.empty, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch + 1) + .setTopics(List.empty.asJava), requireStable = requireStable, version = version.toShort ) @@ -239,7 +294,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB createOffsetsTopic() // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -255,6 +310,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -271,7 +327,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -285,10 +342,11 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch, - partitions = null, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(null), requireStable = requireStable, version = version.toShort ) @@ -299,10 +357,11 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("unknown"), fetchOffsets( - groupId = "unknown", - memberId = memberId, - memberEpoch = memberEpoch, - partitions = null, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("unknown") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(null), requireStable = requireStable, version = version.toShort ) @@ -315,10 +374,11 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code), fetchOffsets( - groupId = "grp", - memberId = "", - memberEpoch = memberEpoch, - partitions = null, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId("") + .setMemberEpoch(memberEpoch) + .setTopics(null), requireStable = requireStable, version = version.toShort ) @@ -330,10 +390,11 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.STALE_MEMBER_EPOCH.code), fetchOffsets( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch + 1, - partitions = null, + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch + 1) + .setTopics(null), requireStable = requireStable, version = version.toShort ) @@ -347,8 +408,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB // in this test because it does not use FindCoordinator API. createOffsetsTopic() + val unknownTopicId = Uuid.randomUuid() + // Create the topic. - createTopic( + val topicId = createTopic( topic = "foo", numPartitions = 3 ) @@ -365,6 +428,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", + topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -383,7 +447,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-0") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -401,7 +466,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-1") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -423,7 +489,8 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-3") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -435,37 +502,60 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-4") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") + .setName(if (version < 10) "foo" else "") + .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(5) .setCommittedOffset(-1L) ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo-unknown") + .setName(if (version < 10) "foo-unknown" else "") + .setTopicId(if (version >= 10) unknownTopicId else Uuid.ZERO_UUID) .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(-1L) + .setErrorCode(if (version >= 10) Errors.UNKNOWN_TOPIC_ID.code else Errors.NONE.code) ).asJava) ).asJava), ).toSet, fetchOffsets( - groups = Map( - "grp-0" -> List( - new TopicPartition("foo", 0), - new TopicPartition("foo", 1), - new TopicPartition("foo", 5) // This one does not exist. - ), - "grp-1" -> null, - "grp-2" -> List.empty, - "grp-3" -> List( - new TopicPartition("foo", 0) - ), - "grp-4" -> List( - new TopicPartition("foo-unknown", 0), // unknown topic id - new TopicPartition("foo", 5) // The partition doesn't exist. - ), + groups = List( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp-0") + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. + ).asJava), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp-1") + .setTopics(null), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp-2") + .setTopics(List.empty.asJava), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp-3") + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0).asJava) + ).asJava), + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp-4") + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo-unknown") // Unknown topic + .setTopicId(unknownTopicId) + .setPartitionIndexes(List[Integer](0).asJava), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](5).asJava) // 5 does not exist. + ).asJava), ), requireStable = requireStable, version = version.toShort @@ -473,4 +563,137 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ) } } + + @ClusterTest + def testFetchOffsetWithRecreatedTopic(): Unit = { + // There are two ways to ensure that committed of recreated topics are not returned. + // 1) When a topic is deleted, GroupCoordinatorService#onPartitionsDeleted is called to + // delete all its committed offsets. + // 2) Since version 10 of the OffsetCommit API, the topic id is stored alongside the + // committed offset. When it is queried, it is only returned iff the topic id of + // committed offset matches the requested one. + // The test tests both conditions but not in a deterministic way as they race + // against each others. + + createOffsetsTopic() + + // Create the topic. + var topicId = createTopic( + topic = "foo", + numPartitions = 3 + ) + + // Join the consumer group. Note that we don't heartbeat here so we must use + // a session long enough for the duration of the test. + val (memberId, memberEpoch) = joinConsumerGroup("grp", true) + + // Commit offsets. + for (partitionId <- 0 to 2) { + commitOffset( + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch, + topic = "foo", + topicId = topicId, + partition = partitionId, + offset = 100L + partitionId, + expectedError = Errors.NONE, + version = ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled) + ) + } + + // Delete topic. + deleteTopic("foo") + + // Recreate topic. + topicId = createTopic( + topic = "foo", + numPartitions = 3 + ) + + // Start from version 10 because fetching topic id is not supported before. + for (version <- 10 to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { + assertEquals( + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("grp") + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setTopicId(topicId) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setCommittedOffset(-1L), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(1) + .setCommittedOffset(-1L), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(2) + .setCommittedOffset(-1L) + ).asJava) + ).asJava), + fetchOffsets( + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0, 1, 2).asJava) + ).asJava), + requireStable = true, + version = version.toShort + ) + ) + } + } + + @ClusterTest + def testGroupErrors(): Unit = { + val topicId = createTopic( + topic = "foo", + numPartitions = 3 + ) + + for (version <- ApiKeys.OFFSET_FETCH.oldestVersion() to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { + assertEquals( + if (version >= 2) { + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("unknown") + .setErrorCode(Errors.NOT_COORDINATOR.code) + } else { + // Version 1 does not support group level errors. Hence, the error is + // returned at the partition level. + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId("unknown") + .setTopics(List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName("foo") + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(0) + .setErrorCode(Errors.NOT_COORDINATOR.code) + .setCommittedOffset(-1) + .setCommittedLeaderEpoch(-1) + .setMetadata("") + ).asJava) + ).asJava) + }, + fetchOffsets( + group = new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("unknown") + .setMemberId("") + .setMemberEpoch(0) + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setTopicId(topicId) + .setPartitionIndexes(List[Integer](0).asJava) + ).asJava), + requireStable = false, + version = version.toShort + ) + ) + } + } } diff --git a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala index fc06a9eeeb759..1b13674685d45 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala @@ -26,16 +26,14 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.Test import scala.jdk.CollectionConverters._ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testOffsetsForLeaderEpochErrorCodes(quorum: String): Unit = { + @Test + def testOffsetsForLeaderEpochErrorCodes(): Unit = { val topic = "topic" val partition = new TopicPartition(topic, 0) val epochs = offsetForLeaderTopicCollectionFor(partition, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH) @@ -57,9 +55,8 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { assertResponseError(Errors.NOT_LEADER_OR_FOLLOWER, nonReplica, request) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCurrentEpochValidation(quorum: String): Unit = { + @Test + def testCurrentEpochValidation(): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val partitionToLeader = createTopic(topic, replicationFactor = 3) diff --git a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala index 5ab33d868a1ff..57545c7ba2b00 100644 --- a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala @@ -21,7 +21,7 @@ import java.nio.ByteBuffer import java.util.{Collections, Properties} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, TopicDescription} -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.message.ProduceRequestData @@ -32,9 +32,9 @@ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} -import org.junit.jupiter.params.provider.ValueSource import java.util.concurrent.TimeUnit import scala.jdk.CollectionConverters._ @@ -47,19 +47,18 @@ class ProduceRequestTest extends BaseRequestTest { val metricsKeySet = KafkaYammerMetrics.defaultRegistry.allMetrics.keySet.asScala - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testSimpleProduceRequest(quorum: String): Unit = { + @Test + def testSimpleProduceRequest(): Unit = { val (partition, leader) = createTopicAndFindPartitionWithLeader("topic") def sendAndCheck(memoryRecords: MemoryRecords, expectedOffset: Long): Unit = { - val topicPartition = new TopicPartition("topic", partition) + val topicId = getTopicIds().get("topic").get val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()) + .setTopicId(topicId) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(topicPartition.partition()) + .setIndex(partition) .setRecords(memoryRecords)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -70,8 +69,8 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) - assertEquals(topicPartition, tp) + assertEquals(topicId, topicProduceResponse.topicId()) + assertEquals(partition, partitionProduceResponse.index()) assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode)) assertEquals(expectedOffset, partitionProduceResponse.baseOffset) assertEquals(-1, partitionProduceResponse.logAppendTimeMs) @@ -122,6 +121,7 @@ class ProduceRequestTest extends BaseRequestTest { ) val partitionToLeader = getPartitionToLeader(admin, topic) val leader = partitionToLeader(partition) + val topicDescription = TestUtils.describeTopic(createAdminClient(), topic) def createRecords(magicValue: Byte, timestamp: Long, codec: Compression): MemoryRecords = { val buf = ByteBuffer.allocate(512) @@ -133,11 +133,11 @@ class ProduceRequestTest extends BaseRequestTest { } val records = createRecords(RecordBatch.MAGIC_VALUE_V2, recordTimestamp, Compression.gzip().build()) - val topicPartition = new TopicPartition("topic", partition) + val topicPartition = new TopicIdPartition(topicDescription.topicId(), partition, "topic") val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()) + .setTopicId(topicPartition.topicId()) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(topicPartition.partition()) .setRecords(records)))).iterator)) @@ -149,7 +149,9 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) + val tp = new TopicIdPartition(topicProduceResponse.topicId(), + partitionProduceResponse.index, + getTopicNames().get(topicProduceResponse.topicId()).getOrElse("")) assertEquals(topicPartition, tp) assertEquals(Errors.INVALID_TIMESTAMP, Errors.forCode(partitionProduceResponse.errorCode)) // there are 3 records with InvalidTimestampException created from inner function createRecords @@ -160,9 +162,8 @@ class ProduceRequestTest extends BaseRequestTest { assertEquals("One or more records have been rejected due to invalid timestamp", partitionProduceResponse.errorMessage) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testProduceToNonReplica(quorum: String): Unit = { + @Test + def testProduceToNonReplica(): Unit = { val topic = "topic" val partition = 0 @@ -182,13 +183,12 @@ class ProduceRequestTest extends BaseRequestTest { // Send the produce request to the non-replica val records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("key".getBytes, "value".getBytes)) - val topicPartition = new TopicPartition("topic", partition) val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()) + .setTopicId(getTopicIds().get("topic").get) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(topicPartition.partition()) + .setIndex(partition) .setRecords(records)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -210,23 +210,22 @@ class ProduceRequestTest extends BaseRequestTest { }.getOrElse(throw new AssertionError(s"No leader elected for topic $topic")) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCorruptLz4ProduceRequest(quorum: String): Unit = { + @Test + def testCorruptLz4ProduceRequest(): Unit = { val (partition, leader) = createTopicAndFindPartitionWithLeader("topic") + val topicId = getTopicIds().get("topic").get val timestamp = 1000000 val memoryRecords = MemoryRecords.withRecords(Compression.lz4().build(), new SimpleRecord(timestamp, "key".getBytes, "value".getBytes)) // Change the lz4 checksum value (not the kafka record crc) so that it doesn't match the contents val lz4ChecksumOffset = 6 memoryRecords.buffer.array.update(DefaultRecordBatch.RECORD_BATCH_OVERHEAD + lz4ChecksumOffset, 0) - val topicPartition = new TopicPartition("topic", partition) val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()) + .setTopicId(topicId) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(topicPartition.partition()) + .setIndex(partition) .setRecords(memoryRecords)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -236,8 +235,8 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) - assertEquals(topicPartition, tp) + assertEquals(topicId, topicProduceResponse.topicId()) + assertEquals(partition, partitionProduceResponse.index()) assertEquals(Errors.CORRUPT_MESSAGE, Errors.forCode(partitionProduceResponse.errorCode)) assertEquals(-1, partitionProduceResponse.baseOffset) assertEquals(-1, partitionProduceResponse.logAppendTimeMs) @@ -245,9 +244,8 @@ class ProduceRequestTest extends BaseRequestTest { assertTrue(TestUtils.meterCount(s"${BrokerTopicMetrics.INVALID_MESSAGE_CRC_RECORDS_PER_SEC}") > 0) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testZSTDProduceRequest(quorum: String): Unit = { + @Test + def testZSTDProduceRequest(): Unit = { val topic = "topic" val partition = 0 @@ -262,7 +260,8 @@ class ProduceRequestTest extends BaseRequestTest { val partitionRecords = new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic").setPartitionData(Collections.singletonList( + .setName("topic") // This test case is testing producer v.7, no need to use topic id + .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(partition) .setRecords(memoryRecords)))) diff --git a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala index 9bf4d4d7e001f..dd5968055e0f9 100644 --- a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala +++ b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala @@ -66,7 +66,7 @@ class RegistrationTestContext( val controllerEpoch = new AtomicInteger(123) config.effectiveAdvertisedBrokerListeners.foreach { ep => advertisedListeners.add(new Listener().setHost(ep.host). - setName(ep.listenerName.value()). + setName(ep.listener). setPort(ep.port.shortValue()). setSecurityProtocol(ep.securityProtocol.id)) } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala index 4eef090e34cc4..5c04e473d447e 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala @@ -18,7 +18,6 @@ package kafka.server import kafka.cluster.Partition import kafka.log.LogManager -import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.ReplicaAlterLogDirsThread.ReassignmentState import kafka.server.metadata.KRaftMetadataCache @@ -30,7 +29,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.server.common +import org.apache.kafka.server.{PartitionFetchState, ReplicaState, common} import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} @@ -574,13 +573,13 @@ class ReplicaAlterLogDirsThreadTest { null, config.replicaFetchBackoffMs) - val result = thread.leader.fetchEpochEndOffsets(Map( - t1p0 -> new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( + t1p0, new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(leaderEpochT1p0), - t1p1 -> new OffsetForLeaderPartition() + t1p1, new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(leaderEpochT1p1))) + .setLeaderEpoch(leaderEpochT1p1))).asScala val expected = Map( t1p0 -> new EpochEndOffset() @@ -636,13 +635,13 @@ class ReplicaAlterLogDirsThreadTest { null, config.replicaFetchBackoffMs) - val result = thread.leader.fetchEpochEndOffsets(Map( - t1p0 -> new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( + t1p0, new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(leaderEpoch), - t1p1 -> new OffsetForLeaderPartition() + t1p1, new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(leaderEpoch))) + .setLeaderEpoch(leaderEpoch))).asScala val expected = Map( t1p0 -> new EpochEndOffset() @@ -1083,14 +1082,15 @@ class ReplicaAlterLogDirsThreadTest { t1p0 -> initialFetchState(0L, leaderEpoch), t1p1 -> initialFetchState(0L, leaderEpoch))) - val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = thread.leader.buildFetch(Map( - t1p0 -> PartitionFetchState(Some(topicId), 150, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty))) - - assertTrue(fetchRequestOpt.isDefined) + val result = thread.leader.buildFetch(java.util.Map.of( + t1p0, new PartitionFetchState(Optional.of(topicId), 150, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), + t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty))) + val fetchRequestOpt = result.result + val partitionsWithError = result.partitionsWithError + assertTrue(fetchRequestOpt.isPresent) val fetchRequest = fetchRequestOpt.get.fetchRequest assertFalse(fetchRequest.fetchData.isEmpty) - assertFalse(partitionsWithError.nonEmpty) + assertTrue(partitionsWithError.isEmpty) val request = fetchRequest.build() assertEquals(0, request.minBytes) val fetchInfos = request.fetchData(topicNames.asJava).asScala.toSeq @@ -1138,39 +1138,54 @@ class ReplicaAlterLogDirsThreadTest { t1p1 -> initialFetchState(0L, leaderEpoch))) // one partition is ready and one is truncating - val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = thread.leader.buildFetch(Map( - t1p0 -> PartitionFetchState(Some(topicId), 150, None, leaderEpoch, state = Fetching, lastFetchedEpoch = Optional.empty), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, state = Truncating, lastFetchedEpoch = Optional.empty))) - - assertTrue(fetchRequestOpt.isDefined) - val fetchRequest = fetchRequestOpt.get - assertFalse(fetchRequest.partitionData.isEmpty) - assertFalse(partitionsWithError.nonEmpty) + val result1 = thread.leader.buildFetch(java.util.Map.of( + t1p0, new PartitionFetchState(Optional.of(topicId), 150, Optional.empty(), leaderEpoch, Optional.empty(), + ReplicaState.FETCHING, Optional.empty()), + t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.empty(), + ReplicaState.TRUNCATING, Optional.empty()) + )) + val fetchRequestOpt1 = result1.result + val partitionsWithError1 = result1.partitionsWithError + + assertTrue(fetchRequestOpt1.isPresent) + val fetchRequest = fetchRequestOpt1.get + assertFalse(fetchRequest.fetchRequest.fetchData.isEmpty) + assertTrue(partitionsWithError1.isEmpty) val fetchInfos = fetchRequest.fetchRequest.build().fetchData(topicNames.asJava).asScala.toSeq assertEquals(1, fetchInfos.length) assertEquals(t1p0, fetchInfos.head._1.topicPartition, "Expected fetch request for non-truncating partition") assertEquals(150, fetchInfos.head._2.fetchOffset) // one partition is ready and one is delayed - val ResultWithPartitions(fetchRequest2Opt, partitionsWithError2) = thread.leader.buildFetch(Map( - t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, state = Fetching, lastFetchedEpoch = Optional.empty), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = Optional.empty))) - - assertTrue(fetchRequest2Opt.isDefined) + val result2 = thread.leader.buildFetch(java.util.Map.of( + t1p0, new PartitionFetchState(Optional.of(topicId), 140, Optional.empty(), leaderEpoch, Optional.empty(), + ReplicaState.FETCHING, Optional.empty()), + t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.of(5000L), + ReplicaState.FETCHING, Optional.empty()) + )) + val fetchRequest2Opt = result2.result + val partitionsWithError2 = result2.partitionsWithError + + assertTrue(fetchRequest2Opt.isPresent) val fetchRequest2 = fetchRequest2Opt.get - assertFalse(fetchRequest2.partitionData.isEmpty) - assertFalse(partitionsWithError2.nonEmpty) + assertFalse(fetchRequest2.fetchRequest.fetchData().isEmpty) + assertTrue(partitionsWithError2.isEmpty()) val fetchInfos2 = fetchRequest2.fetchRequest.build().fetchData(topicNames.asJava).asScala.toSeq assertEquals(1, fetchInfos2.length) assertEquals(t1p0, fetchInfos2.head._1.topicPartition, "Expected fetch request for non-delayed partition") assertEquals(140, fetchInfos2.head._2.fetchOffset) // both partitions are delayed - val ResultWithPartitions(fetchRequest3Opt, partitionsWithError3) = thread.leader.buildFetch(Map( - t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = Optional.empty), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = Optional.empty))) + val result3 = thread.leader.buildFetch(java.util.Map.of( + t1p0, new PartitionFetchState(Optional.of(topicId), 140, Optional.empty(), leaderEpoch, Optional.of(5000L), + ReplicaState.FETCHING, Optional.empty()), + t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.of(5000L), + ReplicaState.FETCHING, Optional.empty()) + )) + val fetchRequest3Opt = result3.result + val partitionsWithError3 = result3.partitionsWithError assertTrue(fetchRequest3Opt.isEmpty, "Expected no fetch requests since all partitions are delayed") - assertFalse(partitionsWithError3.nonEmpty) + assertTrue(partitionsWithError3.isEmpty()) } def stub(logT1p0: UnifiedLog, logT1p1: UnifiedLog, futureLog: UnifiedLog, partition: Partition, diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala index d86cc54ca939a..66b41c0aaf134 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala @@ -17,15 +17,13 @@ package kafka.server -import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.{AfterEach, Test} import kafka.utils.TestUtils import TestUtils._ import kafka.api.IntegrationTestHarness import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.StringSerializer -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource class ReplicaFetchTest extends IntegrationTestHarness { val topic1 = "foo" @@ -39,9 +37,8 @@ class ReplicaFetchTest extends IntegrationTestHarness { override def brokerCount: Int = 2 - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testReplicaFetcherThread(quorum: String): Unit = { + @Test + def testReplicaFetcherThread(): Unit = { val partition = 0 val testMessageList1 = List("test1", "test2", "test3", "test4") val testMessageList2 = List("test5", "test6", "test7", "test8") diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala index 409aaf57b7370..91aa1d5c97821 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala @@ -18,7 +18,7 @@ package kafka.server import kafka.cluster.Partition import kafka.log.LogManager -import kafka.server.AbstractFetcherThread.ResultWithPartitions + import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.epoch.util.MockBlockingSender import kafka.server.metadata.KRaftMetadataCache @@ -36,6 +36,8 @@ import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.ReplicaState +import org.apache.kafka.server.PartitionFetchState import org.apache.kafka.storage.internals.log.{LogAppendInfo, UnifiedLog} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ @@ -52,7 +54,7 @@ import java.util import java.util.{Collections, Optional} import scala.collection.mutable import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption +import scala.jdk.OptionConverters._ class ReplicaFetcherThreadTest { @@ -161,13 +163,13 @@ class ReplicaFetcherThreadTest { mockBlockingSend ) - val result = thread.leader.fetchEpochEndOffsets(Map( - t1p0 -> new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( + t1p0, new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(0), - t1p1 -> new OffsetForLeaderPartition() + t1p1, new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(0))) + .setLeaderEpoch(0))).asScala val expected = Map( t1p0 -> newOffsetForLeaderPartitionResult(t1p0, Errors.UNKNOWN_SERVER_ERROR, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET), @@ -209,9 +211,9 @@ class ReplicaFetcherThreadTest { stub(partition, replicaManager, log) //Define the offsets for the OffsetsForLeaderEpochResponse - val offsets = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)).asJava + val offsets = java.util.Map.of( + t1p0, newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), + t1p1, newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)) //Create the fetcher thread val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM) @@ -307,7 +309,7 @@ class ReplicaFetcherThreadTest { thread.doWork() assertEquals(0, mockNetwork.epochFetchCount) assertEquals(1, mockNetwork.fetchCount) - partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } def partitionData(partition: Int, divergingEpoch: FetchResponseData.EpochEndOffset): FetchResponseData.PartitionData = { new FetchResponseData.PartitionData() @@ -332,7 +334,7 @@ class ReplicaFetcherThreadTest { "Expected " + t1p0 + " to truncate to offset 140 (truncation offsets: " + truncateToCapture.getAllValues + ")") assertTrue(truncateToCapture.getAllValues.asScala.contains(141), "Expected " + t1p1 + " to truncate to offset 141 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } // Loop 3 should truncate because of diverging epoch. Offset truncation is not complete // because divergent epoch is not known to follower. We truncate and stay in Fetching state. @@ -347,7 +349,7 @@ class ReplicaFetcherThreadTest { verify(partition, times(4)).truncateTo(truncateToCapture.capture(), anyBoolean()) assertTrue(truncateToCapture.getAllValues.asScala.contains(129), "Expected to truncate to offset 129 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } // Loop 4 should truncate because of diverging epoch. Offset truncation is not complete // because divergent epoch is not known to follower. Last fetched epoch cannot be determined @@ -364,7 +366,7 @@ class ReplicaFetcherThreadTest { verify(partition, times(6)).truncateTo(truncateToCapture.capture(), anyBoolean()) assertTrue(truncateToCapture.getAllValues.asScala.contains(119), "Expected to truncate to offset 119 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } } @Test @@ -521,7 +523,7 @@ class ReplicaFetcherThreadTest { // Lag is initialized to None when the partition fetch // state is created. - assertEquals(None, thread.fetchState(t1p0).flatMap(_.lag)) + assertEquals(None, thread.fetchState(t1p0).flatMap(_.lag.toScala)) // Prepare the fetch response data. mockNetwork.setFetchPartitionDataForNextResponse(Map( @@ -539,7 +541,7 @@ class ReplicaFetcherThreadTest { assertEquals(1, mockNetwork.fetchCount) // Lag is set to Some(0). - assertEquals(Some(0), thread.fetchState(t1p0).flatMap(_.lag)) + assertEquals(Some(0), thread.fetchState(t1p0).flatMap(_.lag.toScala)) assertEquals(Optional.of(lastFetchedEpoch), thread.fetchState(t1p0).toJava.flatMap(_.lastFetchedEpoch)) } @@ -621,13 +623,14 @@ class ReplicaFetcherThreadTest { val leaderEpoch = 1 val partitionMap = Map( - t1p0 -> PartitionFetchState(Some(topicId1), 150, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty), - t1p1 -> PartitionFetchState(Some(topicId1), 155, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty), - t2p1 -> PartitionFetchState(Some(topicId2), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty)) + t1p0 -> new PartitionFetchState(Optional.of(topicId1), 150, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), + t1p1 -> new PartitionFetchState(Optional.of(topicId1), 155, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), + t2p1 -> new PartitionFetchState(Optional.of(topicId2), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty)) - val ResultWithPartitions(fetchRequestOpt, _) = thread.leader.buildFetch(partitionMap) + val result = thread.leader.buildFetch(partitionMap.asJava) + val fetchRequestOpt = result.result - assertTrue(fetchRequestOpt.isDefined) + assertTrue(fetchRequestOpt.isPresent) val fetchRequestBuilder = fetchRequestOpt.get.fetchRequest val partitionDataMap = partitionMap.map { case (tp, state) => @@ -643,16 +646,17 @@ class ReplicaFetcherThreadTest { responseData.put(tid1p0, new FetchResponseData.PartitionData()) responseData.put(tid1p1, new FetchResponseData.PartitionData()) responseData.put(tid2p1, new FetchResponseData.PartitionData()) - val fetchResponse = FetchResponse.of(Errors.NONE, 0, 123, responseData) + val fetchResponse = FetchResponse.of(Errors.NONE, 0, 123, responseData, List.empty.asJava) leader.fetchSessionHandler.handleResponse(fetchResponse, ApiKeys.FETCH.latestVersion()) // Remove t1p0, change the ID for t2p1, and keep t1p1 the same val newTopicId = Uuid.randomUuid() val partitionMap2 = Map( - t1p1 -> PartitionFetchState(Some(topicId1), 155, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty), - t2p1 -> PartitionFetchState(Some(newTopicId), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = Optional.empty)) - val ResultWithPartitions(fetchRequestOpt2, _) = thread.leader.buildFetch(partitionMap2) + t1p1 -> new PartitionFetchState(Optional.of(topicId1), 155, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), + t2p1 -> new PartitionFetchState(Optional.of(newTopicId), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty)) + val result2 = thread.leader.buildFetch(partitionMap2.asJava) + val fetchRequestOpt2 = result2.result // Since t1p1 didn't change, we drop that one val partitionDataMap2 = partitionMap2.drop(1).map { case (tp, state) => @@ -660,7 +664,7 @@ class ReplicaFetcherThreadTest { config.replicaFetchMaxBytes, Optional.of(state.currentLeaderEpoch), Optional.empty())) } - assertTrue(fetchRequestOpt2.isDefined) + assertTrue(fetchRequestOpt2.isPresent) val fetchRequestBuilder2 = fetchRequestOpt2.get.fetchRequest assertEquals(partitionDataMap2.asJava, fetchRequestBuilder2.fetchData()) assertEquals(Collections.singletonList(tid2p1), fetchRequestBuilder2.replaced()) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala index 23ff3b71e97dc..52dd464e5c3e0 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala @@ -24,6 +24,7 @@ import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache import kafka.utils.TestUtils.waitUntilTrue import kafka.utils.{CoreUtils, Logging, TestUtils} +import org.apache.kafka.common import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionChangeRecord, PartitionRecord, RegisterBrokerRecord, TopicRecord} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors @@ -199,7 +200,6 @@ class ReplicaManagerConcurrencyTest extends Logging { override def createReplicaFetcherManager( metrics: Metrics, time: Time, - threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager ): ReplicaFetcherManager = { Mockito.mock(classOf[ReplicaFetcherManager]) @@ -292,11 +292,13 @@ class ReplicaManagerConcurrencyTest extends Logging { } val future = new CompletableFuture[ProduceResponse.PartitionResponse]() - def produceCallback(results: collection.Map[TopicPartition, ProduceResponse.PartitionResponse]): Unit = { + val topicIdPartition: common.TopicIdPartition = replicaManager.topicIdPartition(topicPartition) + + def produceCallback(results: collection.Map[common.TopicIdPartition, ProduceResponse.PartitionResponse]): Unit = { try { assertEquals(1, results.size) val (topicPartition, result) = results.head - assertEquals(this.topicPartition, topicPartition) + assertEquals(topicIdPartition, topicPartition) assertEquals(Errors.NONE, result.error) future.complete(result) } catch { @@ -309,7 +311,7 @@ class ReplicaManagerConcurrencyTest extends Logging { requiredAcks = (-1).toShort, internalTopicsAllowed = false, origin = AppendOrigin.CLIENT, - entriesPerPartition = collection.Map(topicPartition -> TestUtils.records(records)), + entriesPerPartition = collection.Map(topicIdPartition -> TestUtils.records(records)), responseCallback = produceCallback ) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 748d5eda4ea6f..eb5491812c109 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -28,14 +28,14 @@ import kafka.server.epoch.util.MockBlockingSender import kafka.server.metadata.KRaftMetadataCache import kafka.server.share.{DelayedShareFetch, SharePartition} import kafka.utils.TestUtils.waitUntilTrue -import kafka.utils.{Pool, TestUtils} +import kafka.utils.TestUtils import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.{DirectoryId, IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.InvalidPidMappingException import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.{DeleteRecordsResponseData, ShareFetchResponseData} +import org.apache.kafka.common.message.{DeleteRecordsResponseData, FetchResponseData, ShareFetchResponseData} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.metadata.{PartitionChangeRecord, PartitionRecord, RemoveTopicRecord, TopicRecord} import org.apache.kafka.common.metrics.Metrics @@ -55,7 +55,7 @@ import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig} import org.apache.kafka.image._ import org.apache.kafka.metadata.LeaderConstants.NO_LEADER -import org.apache.kafka.metadata.{LeaderAndIsr, MetadataCache} +import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, PartitionRegistration} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, MetadataVersion, OffsetAndEpoch, RequestLocal, StopPartition} import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} @@ -63,16 +63,20 @@ import org.apache.kafka.server.log.remote.TopicPartitionLog import org.apache.kafka.server.log.remote.storage._ import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.network.BrokerEndPoint +import org.apache.kafka.server.{LogReadResult, PartitionFetchState} import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets} import org.apache.kafka.server.share.SharePartitionKey import org.apache.kafka.server.share.fetch.{DelayedShareFetchGroupKey, DelayedShareFetchKey, ShareFetch} import org.apache.kafka.server.share.metrics.ShareGroupMetrics import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation +import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation.{ADD_PARTITION, GENERIC_ERROR_SUPPORTED} import org.apache.kafka.server.util.timer.MockTimer import org.apache.kafka.server.util.{MockScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.checkpoint.LazyOffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegments, ProducerStateManager, ProducerStateManagerConfig, RemoteStorageFetchInfo, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, FetchDataInfo, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogOffsetSnapshot, LogSegments, ProducerStateManager, ProducerStateManagerConfig, RemoteLogReadResult, RemoteStorageFetchInfo, UnifiedLog, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterAll, AfterEach, BeforeEach, Test} @@ -88,12 +92,12 @@ import java.io.{ByteArrayInputStream, File} import java.net.InetAddress import java.nio.file.{Files, Paths} import java.util -import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong, AtomicReference} -import java.util.concurrent.{Callable, CompletableFuture, ConcurrentHashMap, CountDownLatch, TimeUnit} -import java.util.function.BiConsumer +import java.util.concurrent.atomic.{AtomicLong, AtomicReference} +import java.util.concurrent.{Callable, CompletableFuture, ConcurrentHashMap, CountDownLatch, Future, TimeUnit} +import java.util.function.{BiConsumer, Consumer} import java.util.stream.IntStream import java.util.{Collections, Optional, OptionalLong, Properties} -import scala.collection.{Map, Seq, mutable} +import scala.collection.{mutable, Map, Seq} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.{RichOption, RichOptional} @@ -107,9 +111,12 @@ object ReplicaManagerTest { class ReplicaManagerTest { private val topic = "test-topic" - private val topicId = Uuid.randomUuid() + private val topic2 = "test-topic2" + private val topicId = Uuid.fromString("YK2ed2GaTH2JpgzUaJ8tgg") + private val topicId2 = Uuid.randomUuid() private val topicIds = scala.Predef.Map("test-topic" -> topicId) - private val topicNames = scala.Predef.Map(topicId -> "test-topic") + private val topicNames = topicIds.map(_.swap) + private val topicPartition = new TopicPartition(topic, 0) private val transactionalId = "txn" private val time = new MockTime private val metrics = new Metrics @@ -122,13 +129,12 @@ class ReplicaManagerTest { private var mockRemoteLogManager: RemoteLogManager = _ private var addPartitionsToTxnManager: AddPartitionsToTxnManager = _ private var brokerTopicStats: BrokerTopicStats = _ + private val metadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) private val quotaExceededThrottleTime = 1000 private val quotaAvailableThrottleTime = 0 // Constants defined for readability - private val zkVersion = 0 - private val correlationId = 0 - private val controllerEpoch = 0 + private val partitionEpoch = 0 private val brokerEpoch = 0L // These metrics are static and once we remove them after each test, they won't be created and verified anymore @@ -153,7 +159,23 @@ class ReplicaManagerTest { // Anytime we try to verify, just automatically run the callback as though the transaction was verified. when(addPartitionsToTxnManager.addOrVerifyTransaction(any(), any(), any(), any(), any(), any())).thenAnswer { invocationOnMock => val callback = invocationOnMock.getArgument(4, classOf[AddPartitionsToTxnManager.AppendCallback]) - callback(Map.empty[TopicPartition, Errors].toMap) + callback.complete(util.Map.of()) + } + // make sure metadataCache can map between topic name and id + setupMetadataCacheWithTopicIds(topicIds, metadataCache) + } + + private def setupMetadataCacheWithTopicIds(topicIds: Map[String, Uuid], metadataCache:MetadataCache): Unit = { + val topicNames = topicIds.map(_.swap) + topicNames.foreach { + case (id, name) => + when(metadataCache.getTopicName(id)).thenReturn(Optional.of(name)) + when(metadataCache.getTopicId(name)).thenReturn(id) + } + when(metadataCache.topicIdsToNames()).thenReturn(topicNames.asJava) + + topicIds.foreach { case (topicName, topicId) => + when(metadataCache.getTopicId(topicName)).thenReturn(topicId) } } @@ -169,7 +191,7 @@ class ReplicaManagerTest { @Test def testHighWaterMarkDirectoryMapping(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -185,8 +207,8 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) rm.checkpointHighWatermarks() - config.logDirs.map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) - .foreach(checkpointFile => assertTrue(Files.exists(checkpointFile), + config.logDirs.stream().map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) + .forEach(checkpointFile => assertTrue(Files.exists(checkpointFile), s"checkpoint file does not exist at $checkpointFile")) } finally { rm.shutdown(checkpointHW = false) @@ -198,7 +220,7 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(1) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -214,8 +236,8 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) rm.checkpointHighWatermarks() - config.logDirs.map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) - .foreach(checkpointFile => assertTrue(Files.exists(checkpointFile), + config.logDirs.stream().map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) + .forEach(checkpointFile => assertTrue(Files.exists(checkpointFile), s"checkpoint file does not exist at $checkpointFile")) } finally { rm.shutdown(checkpointHW = false) @@ -224,7 +246,7 @@ class ReplicaManagerTest { @Test def testIllegalRequiredAcks(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -234,10 +256,9 @@ class ReplicaManagerTest { quotaManagers = quotaManager, metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager, - threadNamePrefix = Option(this.getClass.getName)) + alterPartitionManager = alterPartitionManager) try { - def callback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { + def callback(responseStatus: Map[TopicIdPartition, PartitionResponse]): Unit = { assert(responseStatus.values.head.error == Errors.INVALID_REQUIRED_ACKS) } rm.appendRecords( @@ -245,7 +266,7 @@ class ReplicaManagerTest { requiredAcks = 3, internalTopicsAllowed = false, origin = AppendOrigin.CLIENT, - entriesPerPartition = Map(new TopicPartition("test1", 0) -> MemoryRecords.withRecords(Compression.NONE, + entriesPerPartition = Map(new TopicIdPartition(Uuid.randomUuid(), 0, "test1") -> MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("first message".getBytes))), responseCallback = callback) } finally { @@ -275,8 +296,7 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) - val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) - val metadataCache: MetadataCache = mock(classOf[MetadataCache]) + val logManager = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(new Properties())) mockGetAliveBrokerFunctions(metadataCache, Seq(new Node(0, "host0", 0))) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) val rm = new ReplicaManager( @@ -291,38 +311,26 @@ class ReplicaManagerTest { alterPartitionManager = alterPartitionManager) try { - val partition = rm.createPartition(new TopicPartition(topic, 0)) - partition.createLogIfNotExists(isNew = false, isFutureReplica = false, - new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) + val delta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), topicName = topic, topicId = topicIds(topic)) + val image = imageFromTopics(delta.apply()) + rm.applyDelta(delta, image) + val partition = rm.getPartitionOrException(topicPartition) - rm.becomeLeaderOrFollower(0, new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(Seq[Integer](0).asJava) - .setPartitionEpoch(0) - .setReplicas(Seq[Integer](0).asJava) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, Uuid.randomUuid()), - Set(new Node(0, "host1", 0)).asJava).build(), (_, _) => ()) - appendRecords(rm, new TopicPartition(topic, 0), + appendRecords(rm, topicPartition, MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("first message".getBytes()), new SimpleRecord("second message".getBytes()))) - logManager.maybeUpdatePreferredLogDir(new TopicPartition(topic, 0), dir2.getAbsolutePath) + logManager.maybeUpdatePreferredLogDir(topicPartition, dir2.getAbsolutePath) partition.createLogIfNotExists(isNew = true, isFutureReplica = true, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // this method should use hw of future log to create log dir fetcher. Otherwise, it causes offset mismatch error rm.maybeAddLogDirFetchers(Set(partition), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), _ => None) - rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.fetchState(new TopicPartition(topic, 0)).foreach(s => assertEquals(0L, s.fetchOffset))) + rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.fetchState(topicPartition).foreach(s => assertEquals(0L, s.fetchOffset))) // make sure alter log dir thread has processed the data rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.doWork()) assertEquals(Set.empty, rm.replicaAlterLogDirsManager.failedPartitions.partitions()) // the future log becomes the current log, so the partition state should get removed - rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => assertEquals(None, t.fetchState(new TopicPartition(topic, 0)))) + rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => assertEquals(None, t.fetchState(topicPartition))) } finally { rm.shutdown(checkpointHW = false) } @@ -336,13 +344,11 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) - val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) + val logManager = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(new Properties())) val spyLogManager = spy(logManager) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, Seq(new Node(0, "host0", 0))) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) - val tp0 = new TopicPartition(topic, 0) - val uuid = Uuid.randomUuid() val rm = new ReplicaManager( metrics = metrics, config = config, @@ -355,28 +361,13 @@ class ReplicaManagerTest { alterPartitionManager = alterPartitionManager) try { - val partition = rm.createPartition(tp0) - partition.createLogIfNotExists(isNew = false, isFutureReplica = false, - new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), Option.apply(uuid)) - - val response = rm.becomeLeaderOrFollower(0, new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(Seq[Integer](0).asJava) - .setPartitionEpoch(0) - .setReplicas(Seq[Integer](0).asJava) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, uuid), - Set(new Node(0, "host1", 0)).asJava).build(), (_, _) => ()) - // expect the errorCounts only has 1 entry with Errors.NONE - val errorCounts = response.errorCounts() - assertEquals(1, response.errorCounts().size()) - assertNotNull(errorCounts.get(Errors.NONE)) - spyLogManager.maybeUpdatePreferredLogDir(tp0, dir2.getAbsolutePath) + val delta = topicsCreateDelta(startId = 0, isStartIdLeader = true, + partitions = List(0), topicName = topic, topicId = topicId) + val image = imageFromTopics(delta.apply()) + rm.applyDelta(delta, image) + val partition = rm.getPartitionOrException(topicPartition) + + spyLogManager.maybeUpdatePreferredLogDir(topicPartition, dir2.getAbsolutePath) if (futureLogCreated) { // create future log before maybeAddLogDirFetchers invoked @@ -384,12 +375,12 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) } else { val mockLog = mock(classOf[UnifiedLog]) - when(spyLogManager.getLog(tp0, isFuture = true)).thenReturn(Option.apply(mockLog)) - when(mockLog.topicId).thenReturn(Optional.of(uuid)) + when(spyLogManager.getLog(topicPartition, isFuture = true)).thenReturn(Option.apply(mockLog)) + when(mockLog.topicId).thenReturn(Optional.of(topicId)) when(mockLog.parentDir).thenReturn(dir2.getAbsolutePath) } - val topicIdMap: Map[String, Option[Uuid]] = Map(topic -> Option.apply(uuid)) + val topicIdMap: Map[String, Option[Uuid]] = Map(topic -> Option.apply(topicId)) rm.maybeAddLogDirFetchers(Set(partition), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), topicIdMap) if (futureLogCreated) { // since the futureLog is already created, we don't have to abort and pause the cleaning @@ -409,9 +400,8 @@ class ReplicaManagerTest { props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) val logProps = new Properties() - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(logProps)) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(logProps)) val aliveBrokers = Seq(new Node(0, "host0", 0), new Node(1, "host1", 1)) - val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, aliveBrokers) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) val rm = new ReplicaManager( @@ -427,26 +417,15 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava - val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) - val partition = rm.createPartition(new TopicPartition(topic, 0)) + val topicPartition = new TopicPartition(topic, 0) + val partition = rm.createPartition(topicPartition) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val delta = createLeaderDelta(topicId, topicPartition, brokerList.get(0), brokerList, brokerList) + val leaderMetadataImage = imageFromTopics(delta.apply()) + rm.applyDelta(delta, leaderMetadataImage) rm.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -456,20 +435,9 @@ class ReplicaManagerTest { } // Make this replica the follower - val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(1) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - rm.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ()) + val delta1 = createLeaderDelta(topicId, topicPartition, brokerList.get(1), brokerList, brokerList, 1) + val followerMetadataImage = imageFromTopics(delta1.apply()) + rm.applyDelta(delta1, followerMetadataImage) assertTrue(appendResult.hasFired) } finally { @@ -493,8 +461,7 @@ class ReplicaManagerTest { quotaManagers = quotaManager, metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager, - threadNamePrefix = Option(this.getClass.getName)) + alterPartitionManager = alterPartitionManager) // shutdown ReplicaManager so that metrics are removed rm.shutdown(checkpointHW = false) @@ -515,37 +482,21 @@ class ReplicaManagerTest { } } - @Test - def testFencedErrorCausedByBecomeLeader(): Unit = { - testFencedErrorCausedByBecomeLeader(0) - testFencedErrorCausedByBecomeLeader(1) - testFencedErrorCausedByBecomeLeader(10) - } - - private[this] def testFencedErrorCausedByBecomeLeader(loopEpochChange: Int): Unit = { + @ParameterizedTest + @ValueSource(ints = Array(0, 1, 10)) + def testFencedErrorCausedByBecomeLeader(loopEpochChange: Int): Unit = { + val localId = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { - val brokerList = Seq[Integer](0, 1).asJava val topicPartition = new TopicPartition(topic, 0) replicaManager.createPartition(topicPartition) .createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) - def leaderAndIsrRequest(epoch: Int): LeaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(epoch) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0), (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) + val partition = replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) assertEquals(1, replicaManager.logManager.liveLogDirs.filterNot(_ == partition.log.get.dir.getParentFile).size) @@ -557,7 +508,12 @@ class ReplicaManagerTest { // make sure the future log is created replicaManager.futureLocalLogOrException(topicPartition) assertEquals(1, replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.size) - (1 to loopEpochChange).foreach(epoch => replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(epoch), (_, _) => ())) + (1 to loopEpochChange).foreach( + epoch => { + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = epoch) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + } + ) // wait for the ReplicaAlterLogDirsThread to complete TestUtils.waitUntilTrue(() => { replicaManager.replicaAlterLogDirsManager.shutdownIdleFetcherThreads() @@ -588,25 +544,16 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava - val partition = replicaManager.createPartition(new TopicPartition(topic, 0)) + val tp = new TopicPartition(topic, 0) + val partition = replicaManager.createPartition(tp) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - Collections.singletonMap(topic, Uuid.randomUuid()), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val delta = createLeaderDelta(topicId, tp, 0, brokerList, brokerList) + val leaderMetadataImage = imageFromTopics(delta.apply()) + + replicaManager.applyDelta(delta, leaderMetadataImage) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -647,35 +594,28 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava + val tp0 = new TopicPartition(topic, 0) + val tp1 = new TopicPartition(topic, 1) // Create a couple partition for the topic. - val partition0 = replicaManager.createPartition(new TopicPartition(topic, 0)) + val partition0 = replicaManager.createPartition(tp0) partition0.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) - val partition1 = replicaManager.createPartition(new TopicPartition(topic, 1)) + val partition1 = replicaManager.createPartition(tp1) partition1.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader for the partitions. - Seq(0, 1).foreach { partition => - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - Collections.singletonMap(topic, Uuid.randomUuid()), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava, - LeaderAndIsrRequest.Type.UNKNOWN - ).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - replicaManager.getPartitionOrException(new TopicPartition(topic, partition)) - .localLogOrException + Seq(tp0, tp1).foreach { tp => + val delta = createLeaderDelta( + topicId = topicId, + partition = tp, + leaderId = 0, + replicas = brokerList, + isr = brokerList + ) + replicaManager.applyDelta(delta, imageFromTopics(delta.apply())) + replicaManager.getPartitionOrException(tp) } def appendRecord(pid: Long, sequence: Int, partition: Int): Unit = { @@ -689,7 +629,7 @@ class ReplicaManagerTest { def replicaManagerMetricValue(): Int = { KafkaYammerMetrics.defaultRegistry().allMetrics().asScala.filter { case (metricName, _) => - metricName.getName == "ProducerIdCount" && metricName.getType == replicaManager.getClass.getSimpleName + metricName.getName == "ProducerIdCount" && metricName.getType == "ReplicaManager" }.head._2.asInstanceOf[Gauge[Int]].value } @@ -729,6 +669,7 @@ class ReplicaManagerTest { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) val topicPartition = new TopicPartition(topic, 0) + setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) def assertLateTransactionCount(expectedCount: Option[Int]): Unit = { assertEquals(expectedCount, yammerGaugeValue[Int]("PartitionsWithLateTransactionsCount")) @@ -743,20 +684,14 @@ class ReplicaManagerTest { // Make this replica the leader. val brokerList = Seq[Integer](0, 1, 2).asJava - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val leaderDelta = createLeaderDelta( + topicId = topicId, + partition = topicPartition, + leaderId = 0, + replicas = brokerList, + isr = brokerList, + ) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) // Start a transaction val producerId = 234L @@ -794,6 +729,7 @@ class ReplicaManagerTest { def testReadCommittedFetchLimitedAtLSO(): Unit = { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) + setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { val brokerList = Seq[Integer](0, 1).asJava @@ -803,20 +739,10 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val delta = createLeaderDelta(topicId, new TopicPartition(topic, 0), 0, brokerList, brokerList) + val leaderMetadataImage = imageFromTopics(delta.apply()) + + replicaManager.applyDelta(delta, leaderMetadataImage) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -916,6 +842,7 @@ class ReplicaManagerTest { def testDelayedFetchIncludesAbortedTransactions(): Unit = { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) + setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { val brokerList = Seq[Integer](0, 1).asJava @@ -924,20 +851,9 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val delta = topicsCreateDelta(brokerList.get(0), isStartIdLeader = true, partitions = List(0), List.empty, topic, topicIds(topic)) + val leaderMetadataImage = imageFromTopics(delta.apply()) + replicaManager.applyDelta(delta, leaderMetadataImage) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -1004,25 +920,16 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1, 2).asJava - val partition = rm.createPartition(new TopicPartition(topic, 0)) + val tp = new TopicPartition(topic, 0) + val partition = rm.createPartition(tp) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1), new Node(2, "host2", 2)).asJava).build() - rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp, leaderId = 0, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + rm.applyDelta(leaderDelta, leaderMetadataImage) + rm.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -1060,6 +967,7 @@ class ReplicaManagerTest { @Test def testFollowerStateNotUpdatedIfLogReadFails(): Unit = { + val localId = 0 val maxFetchBytes = 1024 * 1024 val aliveBrokersIds = Seq(0, 1) val leaderEpoch = 5 @@ -1068,25 +976,11 @@ class ReplicaManagerTest { try { val tp = new TopicPartition(topic, 0) val tidp = new TopicIdPartition(topicId, tp) - val replicas = aliveBrokersIds.toList.map(Int.box).asJava // Broker 0 becomes leader of the partition - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(0) - .setReplicas(replicas) - .setIsNew(true) - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(leaderAndIsrPartitionState).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - assertEquals(Errors.NONE, leaderAndIsrResponse.error) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) // Follower replica state is initialized, but initial state is not known assertTrue(replicaManager.onlinePartition(tp).isDefined) @@ -1159,6 +1053,7 @@ class ReplicaManagerTest { @Test def testFetchMessagesWithInconsistentTopicId(): Unit = { + val localId = 0 val maxFetchBytes = 1024 * 1024 val aliveBrokersIds = Seq(0, 1) val leaderEpoch = 5 @@ -1167,25 +1062,11 @@ class ReplicaManagerTest { try { val tp = new TopicPartition(topic, 0) val tidp = new TopicIdPartition(topicId, tp) - val replicas = aliveBrokersIds.toList.map(Int.box).asJava // Broker 0 becomes leader of the partition - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(0) - .setReplicas(replicas) - .setIsNew(true) - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(leaderAndIsrPartitionState).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - assertEquals(Errors.NONE, leaderAndIsrResponse.error) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) assertEquals(Some(topicId), replicaManager.getPartitionOrException(tp).topicId) @@ -1225,54 +1106,6 @@ class ReplicaManagerTest { val fetch2 = successfulFetch.headOption.filter(_._1 == zeroTidp).map(_._2) assertTrue(fetch2.isDefined) assertEquals(Errors.NONE, fetch2.get.error) - - // Next create a topic without a topic ID written in the log. - val tp2 = new TopicPartition("noIdTopic", 0) - val tidp2 = new TopicIdPartition(Uuid.randomUuid(), tp2) - - // Broker 0 becomes leader of the partition - val leaderAndIsrPartitionState2 = new LeaderAndIsrRequest.PartitionState() - .setTopicName("noIdTopic") - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(0) - .setReplicas(replicas) - .setIsNew(true) - val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(leaderAndIsrPartitionState2).asJava, - Collections.emptyMap(), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - val leaderAndIsrResponse2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest2, (_, _) => ()) - assertEquals(Errors.NONE, leaderAndIsrResponse2.error) - - assertEquals(None, replicaManager.getPartitionOrException(tp2).topicId) - - // Fetch messages simulating the request containing a topic ID. We should not have an error. - fetchPartitions( - replicaManager, - replicaId = 1, - fetchInfos = Seq(tidp2 -> validFetchPartitionData), - responseCallback = callback - ) - val fetch3 = successfulFetch.headOption.filter(_._1 == tidp2).map(_._2) - assertTrue(fetch3.isDefined) - assertEquals(Errors.NONE, fetch3.get.error) - - // Fetch messages simulating the request not containing a topic ID. We should not have an error. - val zeroTidp2 = new TopicIdPartition(Uuid.ZERO_UUID, tidp2.topicPartition) - fetchPartitions( - replicaManager, - replicaId = 1, - fetchInfos = Seq(zeroTidp2 -> validFetchPartitionData), - responseCallback = callback - ) - val fetch4 = successfulFetch.headOption.filter(_._1 == zeroTidp2).map(_._2) - assertTrue(fetch4.isDefined) - assertEquals(Errors.NONE, fetch4.get.error) - } finally { replicaManager.shutdown(checkpointHW = false) } @@ -1287,10 +1120,10 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2)) try { + val leaderEpoch = 0 // Create 2 partitions, assign replica 0 as the leader for both a different follower (1 and 2) for each val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) - val topicId = Uuid.randomUuid() val tidp0 = new TopicIdPartition(topicId, tp0) val tidp1 = new TopicIdPartition(topicId, tp1) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) @@ -1298,34 +1131,14 @@ class ReplicaManagerTest { replicaManager.createPartition(tp1).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava val partition1Replicas = Seq[Integer](0, 2).asJava - val topicIds = Map(tp0.topic -> topicId, tp1.topic -> topicId).asJava - val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(leaderEpoch) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true), - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp1.topic) - .setPartitionIndex(tp1.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(partition1Replicas) - .setPartitionEpoch(0) - .setReplicas(partition1Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + + val leaderDelta0 = createLeaderDelta(topicIds(topic), tp0, 0, partition0Replicas, partition0Replicas) + val leaderMetadataImage0 = imageFromTopics(leaderDelta0.apply()) + replicaManager.applyDelta(leaderDelta0, leaderMetadataImage0) + + val leaderDelta1 = createLeaderDelta(topicIds(topic), tp1, 0, partition1Replicas, partition1Replicas) + val leaderMetadataImage1 = imageFromTopics(leaderDelta1.apply()) + replicaManager.applyDelta(leaderDelta1, leaderMetadataImage1) // Append a couple of messages. for (i <- 1 to 2) { @@ -1382,66 +1195,51 @@ class ReplicaManagerTest { } } - @Test - def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(): Unit = { - verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(new Properties, expectTruncation = false) - } - /** * If a partition becomes a follower and the leader is unchanged it should check for truncation * if the epoch has increased by more than one (which suggests it has missed an update). For * IBP version 2.7 onwards, we don't require this since we can truncate at any time based * on diverging epochs returned in fetch responses. + * This test assumes IBP >= 2.7 behavior, so `expectTruncation` is set to false and truncation is not expected. */ - private def verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps: Properties, - expectTruncation: Boolean): Unit = { - val topicPartition = 0 - val topicId = Uuid.randomUuid() + @Test + def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(): Unit = { + val extraProps = new Properties val followerBrokerId = 0 val leaderBrokerId = 1 - val controllerId = 0 - val controllerEpoch = 0 var leaderEpoch = 1 val leaderEpochIncrement = 2 - val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) val countDownLatch = new CountDownLatch(1) val offsetFromLeader = 5 - // Prepare the mocked components for the test val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), - topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = expectTruncation, localLogOffset = Optional.of(10), offsetFromLeader = offsetFromLeader, extraProps = extraProps, topicId = Optional.of(topicId)) + topicPartition.partition(), leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, + expectTruncation = false, localLogOffset = Optional.of(10), offsetFromLeader = offsetFromLeader, extraProps = extraProps, topicId = Optional.of(topicId)) try { // Initialize partition state to follower, with leader = 1, leaderEpoch = 1 - val tp = new TopicPartition(topic, topicPartition) - val partition = replicaManager.createPartition(tp) + val partition = replicaManager.createPartition(topicPartition) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - partition.makeFollower( - leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds), - offsetCheckpoints, - None) + val followerDelta = topicsCreateDelta(startId = followerBrokerId, isStartIdLeader = false, partitions = List(topicPartition.partition()), List.empty, topic, topicIds(topic), leaderEpoch) + replicaManager.applyDelta(followerDelta, imageFromTopics(followerDelta.apply())) + + // Verify log created and partition is hosted + val localLog = replicaManager.localLog(topicPartition) + assertTrue(localLog.isDefined, "Log should be created for follower after applyDelta") + val hostedPartition = replicaManager.getPartition(topicPartition) + assertTrue(hostedPartition.isInstanceOf[HostedPartition.Online]) // Make local partition a follower - because epoch increased by more than 1, truncation should // trigger even though leader does not change leaderEpoch += leaderEpochIncrement - val leaderAndIsrRequest0 = new LeaderAndIsrRequest.Builder( - controllerId, controllerEpoch, brokerEpoch, - Seq(leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(followerBrokerId, "host1", 0), - new Node(leaderBrokerId, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest0, - (_, followers) => assertEquals(followerBrokerId, followers.head.partitionId)) + val epochJumpDelta = topicsCreateDelta(startId = followerBrokerId, isStartIdLeader = false, partitions = List(topicPartition.partition()), List.empty, topic, topicIds(topic), leaderEpoch) + replicaManager.applyDelta(epochJumpDelta, imageFromTopics(epochJumpDelta.apply())) + assertTrue(countDownLatch.await(1000L, TimeUnit.MILLISECONDS)) - // Truncation should have happened once - if (expectTruncation) { - verify(mockLogMgr).truncateTo(Map(tp -> offsetFromLeader), isFuture = false) - } - verify(mockLogMgr).finishedInitializingLog(ArgumentMatchers.eq(tp), any()) + verify(mockLogMgr).finishedInitializingLog(ArgumentMatchers.eq(topicPartition), any()) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -1454,7 +1252,7 @@ class ReplicaManagerTest { val leaderBrokerId = 1 val leaderEpoch = 1 val leaderEpochIncrement = 2 - val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) + val aliveBrokerIds = Array(followerBrokerId, leaderBrokerId) val countDownLatch = new CountDownLatch(1) // Prepare the mocked components for the test @@ -1468,8 +1266,8 @@ class ReplicaManagerTest { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - partition.makeLeader( - leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds), + partition.makeLeader(partitionRegistration(leaderBrokerId, leaderEpoch, aliveBrokerIds, partitionEpoch, aliveBrokerIds), + isNew = false, offsetCheckpoints, None) @@ -1488,7 +1286,6 @@ class ReplicaManagerTest { @Test def testPreferredReplicaAsFollower(): Unit = { val topicPartition = 0 - val topicId = Uuid.randomUuid() val followerBrokerId = 0 val leaderBrokerId = 1 val leaderEpoch = 1 @@ -1501,26 +1298,19 @@ class ReplicaManagerTest { leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Optional.of(topicId)) try { - val brokerList = Seq[Integer](0, 1).asJava val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) // Make this replica the follower - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(1) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) + val followerDelta = createFollowerDelta( + topicId = topicId, + partition = tp0, + followerId = 0, + leaderId = 1, + leaderEpoch = 1, + ) + replicaManager.applyDelta(followerDelta, imageFromTopics(followerDelta.apply())) val metadata: ClientMetadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default") @@ -1542,7 +1332,6 @@ class ReplicaManagerTest { @Test def testPreferredReplicaAsLeader(): Unit = { val topicPartition = 0 - val topicId = Uuid.randomUuid() val followerBrokerId = 0 val leaderBrokerId = 1 val leaderEpoch = 1 @@ -1560,21 +1349,19 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) + val partition = replicaManager.createPartition(tp0) + partition.createLogIfNotExists(isNew = false, isFutureReplica = false, + new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta( + topicId = topicId, + partition = tp0, + leaderId = 0, + replicas = brokerList, + isr = brokerList, + leaderEpoch = 1 + ) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) val metadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default") @@ -1604,7 +1391,6 @@ class ReplicaManagerTest { val leaderNode = new Node(leaderBrokerId, "host1", 0, "rack-a") val followerNode = new Node(followerBrokerId, "host2", 1, "rack-b") val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava - val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) @@ -1617,24 +1403,15 @@ class ReplicaManagerTest { )) // Make this replica the leader and remove follower from ISR. - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder( - 0, - 0, - brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(leaderBrokerId) - .setLeaderEpoch(1) - .setIsr(Seq[Integer](leaderBrokerId).asJava) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(leaderNode, followerNode).asJava).build() - - replicaManager.becomeLeaderOrFollower(2, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta( + topicId = topicId, + partition = tp0, + leaderId = leaderBrokerId, + replicas = brokerList, + isr = util.Arrays.asList(leaderBrokerId), + leaderEpoch = 1 + ) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) appendRecords(replicaManager, tp0, TestUtils.singletonRecords(s"message".getBytes)).onFire { response => assertEquals(Errors.NONE, response.error) @@ -1677,28 +1454,13 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), propsModifier = props => props.put(ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG, classOf[MockReplicaSelector].getName)) try { - val leaderBrokerId = 0 - val followerBrokerId = 1 - val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava - val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) // Make this replica the follower - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(1) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) + val followerDelta = createFollowerDelta(topicId, tp0, 0, 1, 1) + val followerMetadataImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerMetadataImage) val metadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getLocalHost, KafkaPrincipal.ANONYMOUS, "default") @@ -1723,14 +1485,13 @@ class ReplicaManagerTest { @Test def testFetchShouldReturnImmediatelyWhenPreferredReadReplicaIsDefined(): Unit = { + val localId = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), propsModifier = props => props.put(ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG, "org.apache.kafka.common.replica.RackAwareReplicaSelector")) try { val leaderBrokerId = 0 val followerBrokerId = 1 - val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava - val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) @@ -1744,20 +1505,9 @@ class ReplicaManagerTest { // Make this replica the leader val leaderEpoch = 1 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) // The leader must record the follower's fetch offset to make it eligible for follower fetch selection val followerFetchData = new PartitionData(topicId, 0L, 0L, Int.MaxValue, Optional.of(Int.box(leaderEpoch)), Optional.empty[Integer]) @@ -1804,27 +1554,15 @@ class ReplicaManagerTest { topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Optional.of(topicId)) try { - - val brokerList = Seq[Integer](0, 1).asJava - val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) + val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) // Make this replica the follower - val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(false)).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ()) + val followerDelta = createFollowerDelta(topicId, tp0, 1, 0, 1) + val followerImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerImage) val simpleRecords = Seq(new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)) val appendResult = appendRecords(replicaManager, tp0, @@ -1910,21 +1648,10 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) + + val followerDelta = createFollowerDelta(topicId, tp0, 0, 1) + val followerImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerImage) // Fetch from follower, with non-empty ClientMetadata (FetchRequest v11+) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") @@ -1946,6 +1673,7 @@ class ReplicaManagerTest { @Test def testFetchRequestRateMetrics(): Unit = { + val localId = 0 val mockTimer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1)) @@ -1954,22 +1682,10 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) def assertMetricCount(expected: Int): Unit = { assertEquals(expected, replicaManager.brokerTopicStats.allTopicsStats.totalFetchRequestRate.count) @@ -2003,22 +1719,10 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) val partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, Optional.empty()) @@ -2026,20 +1730,9 @@ class ReplicaManagerTest { assertFalse(fetchResult.hasFired) // Become a follower and ensure that the delayed fetch returns immediately - val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(2) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) + val followerDelta = createFollowerDelta(topicId, tp0, followerId = 0, leaderId = 1, leaderEpoch = 2) + val followerMetadataImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerMetadataImage) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchResult.assertFired.error) } finally { replicaManager.shutdown(checkpointHW = false) @@ -2058,20 +1751,9 @@ class ReplicaManagerTest { replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava - val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = 1, replicas = partition0Replicas, isr = partition0Replicas) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") val partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, @@ -2086,20 +1768,9 @@ class ReplicaManagerTest { assertFalse(fetchResult.hasFired) // Become a follower and ensure that the delayed fetch returns immediately - val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(2) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) + val followerDelta = createFollowerDelta(topicId, tp0, followerId = 0, leaderId = 1, leaderEpoch = 2) + val followerMetadataImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerMetadataImage) assertEquals(Errors.FENCED_LEADER_EPOCH, fetchResult.assertFired.error) } finally { replicaManager.shutdown(checkpointHW = false) @@ -2108,6 +1779,7 @@ class ReplicaManagerTest { @Test def testFetchFromLeaderAlwaysAllowed(): Unit = { + val localId = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1)) try { @@ -2115,22 +1787,10 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true)).asJava, - topicIds.asJava, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") var partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, @@ -2155,16 +1815,16 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 0 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) - val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0, tp1)) + val brokerList = Seq[Integer](0, 1).asJava try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta0 = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderDelta1 = createLeaderDelta(topicId, tp1, leaderId = 1, replicas = brokerList, isr = brokerList) + val image0 = imageFromTopics(leaderDelta0.apply()) + replicaManager.applyDelta(leaderDelta0, image0) - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val image1 = imageFromTopics(leaderDelta1.apply()) + replicaManager.applyDelta(leaderDelta1, image1) // If we supply no transactional ID and idempotent records, we do not verify. val idempotentRecords = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -2184,7 +1844,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), any[AddPartitionsToTxnManager.AppendCallback](), any() ) @@ -2203,12 +1863,13 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) + val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -2221,7 +1882,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) @@ -2230,7 +1891,7 @@ class ReplicaManagerTest { // Confirm we did not write to the log and instead returned error. val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> Errors.INVALID_TXN_STATE).toMap) + callback.complete(util.Map.of(tp0, Errors.INVALID_TXN_STATE)) assertEquals(Errors.INVALID_TXN_STATE, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) @@ -2241,14 +1902,14 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback2.capture(), any() ) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue - callback2(Map.empty[TopicPartition, Errors].toMap) + callback2.complete(util.Map.of()) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } finally { @@ -2268,15 +1929,16 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 6 + val sequence = 0 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) val scheduler = new MockScheduler(time) + val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0), scheduler = scheduler) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -2284,13 +1946,13 @@ class ReplicaManagerTest { // We should add these partitions to the manager to verify. val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, origin = AppendOrigin.CLIENT, - transactionalId = transactionalId, transactionSupportedOperation = addPartition) + transactionalId = transactionalId, transactionSupportedOperation = ADD_PARTITION) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) @@ -2299,7 +1961,7 @@ class ReplicaManagerTest { // Confirm we did not write to the log and instead returned error. var callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> error).toMap) + callback.complete(util.Map.of(tp0, error)) if (error != Errors.CONCURRENT_TRANSACTIONS) { // NOT_COORDINATOR is converted to NOT_ENOUGH_REPLICAS @@ -2316,12 +1978,12 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) callback = appendCallback.getValue - callback(Map.empty[TopicPartition, Errors].toMap) + callback.complete(util.Map.of()) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } @@ -2335,16 +1997,17 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 6 + val sequence = 0 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) + val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) - // Start with sequence 6 + // Start with sequence 0 val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) @@ -2355,7 +2018,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) @@ -2364,11 +2027,11 @@ class ReplicaManagerTest { // Confirm we did not write to the log and instead returned error. val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> Errors.INVALID_PRODUCER_ID_MAPPING).toMap) + callback.complete(util.Map.of(tp0, Errors.INVALID_PRODUCER_ID_MAPPING)) assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) - // Try to append a higher sequence (7) after the first one failed with a retriable error. + // Try to append a higher sequence (1) after the first one failed with a retriable error. val transactionalRecords2 = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence + 1, new SimpleRecord("message".getBytes)) @@ -2378,7 +2041,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback2.capture(), any() ) @@ -2386,7 +2049,7 @@ class ReplicaManagerTest { // Verification should succeed, but we expect to fail with OutOfOrderSequence and for the VerificationGuard to remain. val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue - callback2(Map.empty[TopicPartition, Errors].toMap) + callback2.complete(util.Map.of()) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) assertEquals(Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, result2.assertFired.error) } finally { @@ -2394,8 +2057,79 @@ class ReplicaManagerTest { } } + @Test + def testTransactionVerificationRejectsLowerProducerEpoch(): Unit = { + val tp0 = new TopicPartition(topic, 0) + val producerId = 24L + val producerEpoch = 5.toShort + val lowerProducerEpoch= 4.toShort + val sequence = 6 + val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) + val brokerList = Seq[Integer](0, 1).asJava + + val replicaManager = + setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) + + try { + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + + // first append with epoch 5 + val transactionalRecords = MemoryRecords.withTransactionalRecords( + Compression.NONE, + producerId, + producerEpoch, + sequence, + new SimpleRecord("message".getBytes) + ) + + handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) + + val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(producerId), + ArgumentMatchers.eq(producerEpoch), + ArgumentMatchers.eq(util.List.of(tp0)), + appendCallback.capture(), + any() + ) + + val verificationGuard = getVerificationGuard(replicaManager, tp0, producerId) + assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) + + // simulate successful verification + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue + callback.complete(util.Map.of()) + + assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) + assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) + + // append lower epoch 4 + val transactionalRecords2 = MemoryRecords.withTransactionalRecords( + Compression.NONE, + producerId, + lowerProducerEpoch, + sequence + 1, + new SimpleRecord("message".getBytes) + ) + + val result2 = handleProduceAppend(replicaManager, tp0, transactionalRecords2, transactionalId = transactionalId) + + // no extra call to the txn‑manager should have been made + verifyNoMoreInteractions(addPartitionsToTxnManager) + + // broker returns the fencing error + assertEquals(Errors.INVALID_PRODUCER_EPOCH, result2.assertFired.error) + } finally { + replicaManager.shutdown(checkpointHW = false) + } + } + @Test def testTransactionVerificationGuardOnMultiplePartitions(): Unit = { + val localId = 0 val mockTimer = new MockTimer(time) val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) @@ -2404,14 +2138,11 @@ class ReplicaManagerTest { val sequence = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer) + setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) - - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord(s"message $sequence".getBytes)) @@ -2428,6 +2159,7 @@ class ReplicaManagerTest { @Test def testExceptionWhenUnverifiedTransactionHasMultipleProducerIds(): Unit = { + val localId = 1 val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) val transactionalId = "txn1" @@ -2440,13 +2172,9 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0, tp1)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) - - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) // Append some transactional records with different producer IDs val transactionalRecords = mutable.Map[TopicPartition, MemoryRecords]() @@ -2469,7 +2197,7 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 6 + val sequence = 0 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) @@ -2505,8 +2233,9 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp), config = config) try { - val becomeLeaderRequest = makeLeaderAndIsrRequest(topicIds(tp.topic), tp, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) + val delta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), List.empty, topic, topicIds(topic)) + val leaderMetadataImage = imageFromTopics(delta.apply()) + replicaManager.applyDelta(delta, leaderMetadataImage) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord(s"message $sequence".getBytes)) @@ -2546,12 +2275,13 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) + val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -2564,7 +2294,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) @@ -2581,7 +2311,7 @@ class ReplicaManagerTest { // Confirm we did not write to the log and instead returned error. val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> Errors.INVALID_TXN_STATE).toMap) + callback.complete(util.Map.of(tp0, Errors.INVALID_TXN_STATE)) assertEquals(Errors.INVALID_TXN_STATE, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) @@ -2606,6 +2336,7 @@ class ReplicaManagerTest { ) ) def testVerificationErrorConversionsTV2(error: Errors): Unit = { + val localId = 1 val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort @@ -2614,29 +2345,28 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) // Start verification and return the coordinator related errors. val expectedMessage = s"Unable to verify the partition has been added to the transaction. Underlying error: ${error.toString}" - val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId, transactionSupportedOperation = addPartition) + val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId, transactionSupportedOperation = ADD_PARTITION) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) // Confirm we did not write to the log and instead returned the converted error with the correct error message. val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> error).toMap) + callback.complete(util.Map.of(tp0, error)) assertEquals(Errors.NOT_ENOUGH_REPLICAS, result.assertFired.error) assertEquals(expectedMessage, result.assertFired.errorMessage) } finally { @@ -2656,6 +2386,7 @@ class ReplicaManagerTest { ) ) def testVerificationErrorConversionsTV1(error: Errors): Unit = { + val localId = 1 val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort @@ -2664,9 +2395,8 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - replicaManager.becomeLeaderOrFollower(1, - makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), - (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) @@ -2679,14 +2409,14 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) // Confirm we did not write to the log and instead returned the converted error with the correct error message. val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback(Map(tp0 -> error).toMap) + callback.complete(util.Map.of(tp0, error)) assertEquals(Errors.NOT_ENOUGH_REPLICAS, result.assertFired.error) assertEquals(expectedMessage, result.assertFired.errorMessage) } finally { @@ -2710,7 +2440,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(Seq(tp0)), + ArgumentMatchers.eq(util.List.of(tp0)), appendCallback.capture(), any() ) @@ -2722,11 +2452,11 @@ class ReplicaManagerTest { private def sendProducerAppend( replicaManager: ReplicaManager, - topicPartition: TopicPartition, + topicPartition: TopicIdPartition, numOfRecords: Int ): AtomicReference[PartitionResponse] = { val produceResult = new AtomicReference[PartitionResponse]() - def callback(response: Map[TopicPartition, PartitionResponse]): Unit = { + def callback(response: Map[TopicIdPartition, PartitionResponse]): Unit = { produceResult.set(response(topicPartition)) } @@ -2749,11 +2479,6 @@ class ReplicaManagerTest { produceResult } - /** - * This method assumes that the test using created ReplicaManager calls - * ReplicaManager.becomeLeaderOrFollower() once with LeaderAndIsrRequest containing - * 'leaderEpochInLeaderAndIsr' leader epoch for partition 'topicPartition'. - */ private def prepareReplicaManagerAndLogManager(timer: MockTimer, topicPartition: Int, leaderEpochInLeaderAndIsr: Int, @@ -2771,7 +2496,7 @@ class ReplicaManagerTest { props.asScala ++= extraProps.asScala val config = KafkaConfig.fromProps(props) val logConfig = new LogConfig(new Properties) - val logDir = new File(new File(config.logDirs.head), s"$topic-$topicPartition") + val logDir = new File(new File(config.logDirs.get(0)), s"$topic-$topicPartition") Files.createDirectories(logDir.toPath) val mockScheduler = new MockScheduler(time) val mockBrokerTopicStats = new BrokerTopicStats @@ -2832,19 +2557,19 @@ class ReplicaManagerTest { // Expect to call LogManager.truncateTo exactly once val topicPartitionObj = new TopicPartition(topic, topicPartition) val mockLogMgr: LogManager = mock(classOf[LogManager]) - when(mockLogMgr.liveLogDirs).thenReturn(config.logDirs.map(new File(_).getAbsoluteFile)) + when(mockLogMgr.liveLogDirs).thenReturn(config.logDirs.asScala.map(new File(_).getAbsoluteFile)) when(mockLogMgr.getOrCreateLog(ArgumentMatchers.eq(topicPartitionObj), ArgumentMatchers.eq(false), ArgumentMatchers.eq(false), any(), any())).thenReturn(mockLog) when(mockLogMgr.getLog(topicPartitionObj, isFuture = false)).thenReturn(Some(mockLog)) when(mockLogMgr.getLog(topicPartitionObj, isFuture = true)).thenReturn(None) - val allLogs = new Pool[TopicPartition, UnifiedLog]() + val allLogs = new ConcurrentHashMap[TopicPartition, UnifiedLog]() allLogs.put(topicPartitionObj, mockLog) - when(mockLogMgr.allLogs).thenReturn(allLogs.values) + when(mockLogMgr.allLogs).thenReturn(allLogs.values.asScala) when(mockLogMgr.isLogDirOnline(anyString)).thenReturn(true) + when(mockLogMgr.directoryId(anyString)).thenReturn(None) val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId)) - val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, aliveBrokers) when(metadataCache.getPartitionReplicaEndpoints( any[TopicPartition], any[ListenerName])). @@ -2889,15 +2614,13 @@ class ReplicaManagerTest { delayedDeleteRecordsPurgatoryParam = Some(mockDeleteRecordsPurgatory), delayedRemoteFetchPurgatoryParam = Some(mockRemoteFetchPurgatory), delayedRemoteListOffsetsPurgatoryParam = Some(mockRemoteListOffsetsPurgatory), - delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory), - threadNamePrefix = Option(this.getClass.getName)) { + delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory)) { override protected def createReplicaFetcherManager(metrics: Metrics, time: Time, - threadNamePrefix: Option[String], replicationQuotaManager: ReplicationQuotaManager): ReplicaFetcherManager = { val rm = this - new ReplicaFetcherManager(this.config, rm, metrics, time, threadNamePrefix, replicationQuotaManager, () => this.metadataCache.metadataVersion(), () => 1) { + new ReplicaFetcherManager(this.config, rm, metrics, time, replicationQuotaManager, () => this.metadataCache.metadataVersion(), () => 1) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { val logContext = new LogContext(s"[ReplicaFetcher replicaId=${rm.config.brokerId}, leaderId=${sourceBroker.id}, " + @@ -2930,21 +2653,21 @@ class ReplicaManagerTest { (replicaManager, mockLogMgr) } - private def leaderAndIsrPartitionState(topicPartition: TopicPartition, - leaderEpoch: Int, - leaderBrokerId: Int, - aliveBrokerIds: Seq[Integer], - isNew: Boolean = false): LeaderAndIsrRequest.PartitionState = { - new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(topicPartition.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(leaderBrokerId) + private def partitionRegistration(leader: Int, + leaderEpoch: Int, + isr: Array[Int], + partitionEpoch: Int, + replicas: Array[Int]): PartitionRegistration = { + new PartitionRegistration.Builder() + .setLeader(leader) + .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(aliveBrokerIds.asJava) - .setPartitionEpoch(zkVersion) - .setReplicas(aliveBrokerIds.asJava) - .setIsNew(isNew) + .setIsr(isr) + .setPartitionEpoch(partitionEpoch) + .setReplicas(replicas) + .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .build() + } private class CallbackResult[T] { @@ -2978,8 +2701,9 @@ class ReplicaManagerTest { origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { - val response = responses.get(partition) + val topicIdPartition = new TopicIdPartition(topicId, partition) + def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { + val response = responses.get(topicIdPartition) assertTrue(response.isDefined) result.fire(response.get) } @@ -2989,7 +2713,7 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, origin = origin, - entriesPerPartition = Map(partition -> records), + entriesPerPartition = Map(new TopicIdPartition(topicId, partition) -> records), responseCallback = appendCallback, ) @@ -3000,11 +2724,11 @@ class ReplicaManagerTest { entriesToAppend: Map[TopicPartition, MemoryRecords], transactionalId: String, requiredAcks: Short = -1, - transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported - ): CallbackResult[Map[TopicPartition, PartitionResponse]] = { - val result = new CallbackResult[Map[TopicPartition, PartitionResponse]]() - def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { - responses.foreach( response => assertTrue(responses.contains(response._1))) + transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED + ): CallbackResult[Map[TopicIdPartition, PartitionResponse]] = { + val result = new CallbackResult[Map[TopicIdPartition, PartitionResponse]]() + def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { + responses.foreach( response => assertTrue(responses.get(response._1).isDefined)) result.fire(responses) } @@ -3013,7 +2737,7 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, transactionalId = transactionalId, - entriesPerPartition = entriesToAppend, + entriesPerPartition = entriesToAppend.map { case(tp, memoryRecords) => replicaManager.topicIdPartition(tp) -> memoryRecords }, responseCallback = appendCallback, transactionSupportedOperation = transactionSupportedOperation ) @@ -3027,12 +2751,13 @@ class ReplicaManagerTest { origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1, transactionalId: String, - transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported + transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED ): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { - val response = responses.get(partition) + val topicIdPartition = new TopicIdPartition(topicIds.get(partition.topic()).getOrElse(Uuid.ZERO_UUID), partition) + def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { + val response = responses.get(topicIdPartition) assertTrue(response.isDefined) result.fire(response.get) } @@ -3043,7 +2768,9 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, transactionalId = transactionalId, - entriesPerPartition = entriesPerPartition, + entriesPerPartition = entriesPerPartition.map { + case (topicPartition, records) => replicaManager.topicIdPartition(topicPartition) -> records + }, responseCallback = appendCallback, transactionSupportedOperation = transactionSupportedOperation ) @@ -3057,7 +2784,7 @@ class ReplicaManagerTest { producerId: Long, producerEpoch: Short, baseSequence: Int = 0, - transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported + transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED ): CallbackResult[Either[Errors, VerificationGuard]] = { val result = new CallbackResult[Either[Errors, VerificationGuard]]() def postVerificationCallback(errorAndGuard: (Errors, VerificationGuard)): Unit = { @@ -3203,9 +2930,7 @@ class ReplicaManagerTest { transactionalTopicPartitions: List[TopicPartition], config: KafkaConfig = config, scheduler: Scheduler = new MockScheduler(time)): ReplicaManager = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) - val metadataCache = mock(classOf[MetadataCache]) - + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val replicaManager = new ReplicaManager( metrics = metrics, config = config, @@ -3232,7 +2957,6 @@ class ReplicaManagerTest { propsModifier: Properties => Unit = _ => {}, mockReplicaFetcherManager: Option[ReplicaFetcherManager] = None, mockReplicaAlterLogDirsManager: Option[ReplicaAlterLogDirsManager] = None, - isShuttingDown: AtomicBoolean = new AtomicBoolean(false), enableRemoteStorage: Boolean = false, shouldMockLog: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, @@ -3253,10 +2977,11 @@ class ReplicaManagerTest { if (enableRemoteStorage && defaultTopicRemoteLogStorageEnable) { logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") } - val mockLog = setupMockLog(path1) + val logConfig = new LogConfig(logProps) + val mockLogFn = (topicPartition: TopicPartition, topicId: Option[Uuid]) => setupMockLog(path1, logConfig, enableRemoteStorage, topicPartition, topicId) if (setupLogDirMetaProperties) { // add meta.properties file in each dir - config.logDirs.foreach(dir => { + config.logDirs.stream().forEach(dir => { val metaProps = new MetaProperties.Builder(). setVersion(MetaPropertiesVersion.V0). setClusterId("clusterId"). @@ -3267,10 +2992,7 @@ class ReplicaManagerTest { new File(new File(dir), MetaPropertiesEnsemble.META_PROPERTIES_NAME).getAbsolutePath, false) }) } - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(logProps), log = if (shouldMockLog) Some(mockLog) else None, remoteStorageSystemEnable = enableRemoteStorage) - val logConfig = new LogConfig(logProps) - when(mockLog.config).thenReturn(logConfig) - when(mockLog.remoteLogEnabled()).thenReturn(enableRemoteStorage) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), logConfig, logFn = if (shouldMockLog) Some(mockLogFn) else None, remoteStorageSystemEnable = enableRemoteStorage) val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId)) brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.isRemoteStorageSystemEnabled) @@ -3317,14 +3039,12 @@ class ReplicaManagerTest { logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, brokerTopicStats = brokerTopicStats, - isShuttingDown = isShuttingDown, delayedProducePurgatoryParam = Some(mockProducePurgatory), delayedFetchPurgatoryParam = Some(mockFetchPurgatory), delayedDeleteRecordsPurgatoryParam = Some(mockDeleteRecordsPurgatory), delayedRemoteFetchPurgatoryParam = Some(mockDelayedRemoteFetchPurgatory), delayedRemoteListOffsetsPurgatoryParam = Some(mockDelayedRemoteListOffsetsPurgatory), delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory), - threadNamePrefix = Option(this.getClass.getName), addPartitionsToTxnManager = Some(addPartitionsToTxnManager), directoryEventHandler = directoryEventHandler, remoteLogManager = if (enableRemoteStorage) { @@ -3337,7 +3057,6 @@ class ReplicaManagerTest { override protected def createReplicaFetcherManager( metrics: Metrics, time: Time, - threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager ): ReplicaFetcherManager = { mockReplicaFetcherManager.getOrElse { @@ -3345,21 +3064,19 @@ class ReplicaManagerTest { super.createReplicaFetcherManager( metrics, time, - threadNamePrefix, quotaManager ) val config = this.config val metadataCache = this.metadataCache - new ReplicaFetcherManager(config, this, metrics, time, threadNamePrefix, quotaManager, () => metadataCache.metadataVersion(), () => 1) { + new ReplicaFetcherManager(config, this, metrics, time, quotaManager, () => metadataCache.metadataVersion(), () => 1) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { - val prefix = threadNamePrefix.map(tp => s"$tp:").getOrElse("") - val threadName = s"${prefix}ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" + val threadName = s"ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" val tp = new TopicPartition(topic, 0) val leader = new MockLeaderEndPoint() { - override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { - Map(tp -> new FetchData().setErrorCode(Errors.OFFSET_MOVED_TO_TIERED_STORAGE.code)) - } + override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + Map(tp -> new FetchResponseData.PartitionData().setErrorCode(Errors.OFFSET_MOVED_TO_TIERED_STORAGE.code)) + }.asJava } leader.setLeaderState(tp, PartitionState(leaderEpoch = 0)) leader.setReplicaPartitionStateCallback(_ => PartitionState(leaderEpoch = 0)) @@ -3382,7 +3099,6 @@ class ReplicaManagerTest { super.createReplicaFetcherManager( metrics, time, - threadNamePrefix, quotaManager ) } @@ -3405,11 +3121,8 @@ class ReplicaManagerTest { @Test def testOldLeaderLosesMetricsWhenReassignPartitions(): Unit = { - val controllerEpoch = 0 val leaderEpoch = 0 val leaderEpochIncrement = 1 - val correlationId = 0 - val controllerId = 0 val mockTopicStats1: BrokerTopicStats = mock(classOf[BrokerTopicStats]) val (rm0, rm1) = prepareDifferentReplicaManagers(mock(classOf[BrokerTopicStats]), mockTopicStats1) @@ -3422,64 +3135,34 @@ class ReplicaManagerTest { val partition1Replicas = Seq[Integer](1, 0).asJava val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(controllerId, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true), - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp1.topic) - .setPartitionIndex(tp1.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(1) - .setLeaderEpoch(leaderEpoch) - .setIsr(partition1Replicas) - .setPartitionEpoch(0) - .setReplicas(partition1Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() - - rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) - rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) + val delta1 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch) + delta1.replay(new PartitionRecord() + .setPartitionId(tp1.partition) + .setTopicId(topicIds.get(topic)) + .setIsr(partition1Replicas) + .setReplicas(partition1Replicas) + .setLeader(partition1Replicas.get(0)) + .setLeaderEpoch(leaderEpoch) + .setPartitionEpoch(0) + ) + val leaderMetadataImage1 = imageFromTopics(delta1.apply()) + rm0.applyDelta(delta1, leaderMetadataImage1) + rm1.applyDelta(delta1, leaderMetadataImage1) // make broker 0 the leader of partition 1 so broker 1 loses its leadership position - val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder( controllerId, controllerEpoch, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(0) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true), - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp1.topic) - .setPartitionIndex(tp1.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(0) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setIsr(partition1Replicas) - .setPartitionEpoch(0) - .setReplicas(partition1Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() - - rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) - rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) + val delta2 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch + leaderEpochIncrement) + delta2.replay(new PartitionRecord() + .setPartitionId(tp1.partition) + .setTopicId(topicIds.get(topic)) + .setIsr(partition1Replicas) + .setReplicas(partition1Replicas) + .setLeader(partition1Replicas.get(1)) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setPartitionEpoch(0) + ) + val leaderMetadataImage2 = imageFromTopics(delta2.apply()) + rm0.applyDelta(delta2, leaderMetadataImage2) + rm1.applyDelta(delta2, leaderMetadataImage2) } finally { Utils.tryAll(util.Arrays.asList[Callable[Void]]( () => { @@ -3499,11 +3182,8 @@ class ReplicaManagerTest { @Test def testOldFollowerLosesMetricsWhenReassignPartitions(): Unit = { - val controllerEpoch = 0 val leaderEpoch = 0 val leaderEpochIncrement = 1 - val correlationId = 0 - val controllerId = 0 val mockTopicStats1: BrokerTopicStats = mock(classOf[BrokerTopicStats]) val (rm0, rm1) = prepareDifferentReplicaManagers(mock(classOf[BrokerTopicStats]), mockTopicStats1) @@ -3516,65 +3196,34 @@ class ReplicaManagerTest { val partition1Replicas = Seq[Integer](1, 0).asJava val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava - val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(controllerId, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(1) - .setLeaderEpoch(leaderEpoch) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true), - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp1.topic) - .setPartitionIndex(tp1.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(1) - .setLeaderEpoch(leaderEpoch) - .setIsr(partition1Replicas) - .setPartitionEpoch(0) - .setReplicas(partition1Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() - - rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) - rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) + val delta = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch) + delta.replay(new PartitionRecord() + .setPartitionId(tp1.partition) + .setTopicId(topicIds.get(topic)) + .setIsr(partition1Replicas) + .setReplicas(partition1Replicas) + .setLeader(partition1Replicas.get(0)) + .setLeaderEpoch(leaderEpoch) + .setPartitionEpoch(0) + ) + val leaderMetadataImage = imageFromTopics(delta.apply()) + rm0.applyDelta(delta, leaderMetadataImage) + rm1.applyDelta(delta, leaderMetadataImage) // make broker 0 the leader of partition 1 so broker 1 loses its leadership position - val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(controllerId, - controllerEpoch, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(0) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true), - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp1.topic) - .setPartitionIndex(tp1.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(0) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setIsr(partition1Replicas) - .setPartitionEpoch(0) - .setReplicas(partition1Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() - - rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) - rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) + val delta2 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(1), partition0Replicas, partition0Replicas, leaderEpoch + leaderEpochIncrement) + delta2.replay(new PartitionRecord() + .setPartitionId(tp1.partition) + .setTopicId(topicIds.get(topic)) + .setIsr(partition1Replicas) + .setReplicas(partition1Replicas) + .setLeader(partition1Replicas.get(1)) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setPartitionEpoch(0) + ) + val leaderMetadataImage2 = imageFromTopics(delta2.apply()) + rm0.applyDelta(delta2, leaderMetadataImage2) + rm1.applyDelta(delta2, leaderMetadataImage2) } finally { Utils.tryAll(util.Arrays.asList[Callable[Void]]( () => { @@ -3590,7 +3239,6 @@ class ReplicaManagerTest { // verify that broker 1 did remove its metrics when no longer being the leader of partition 1 verify(mockTopicStats1).removeOldLeaderMetrics(topic) - verify(mockTopicStats1).removeOldFollowerMetrics(topic) } private def prepareDifferentReplicaManagers(brokerTopicStats1: BrokerTopicStats, @@ -3604,8 +3252,8 @@ class ReplicaManagerTest { val config0 = KafkaConfig.fromProps(props0) val config1 = KafkaConfig.fromProps(props1) - val mockLogMgr0 = TestUtils.createLogManager(config0.logDirs.map(new File(_))) - val mockLogMgr1 = TestUtils.createLogManager(config1.logDirs.map(new File(_))) + val mockLogMgr0 = TestUtils.createLogManager(config0.logDirs.asScala.map(new File(_))) + val mockLogMgr1 = TestUtils.createLogManager(config1.logDirs.asScala.map(new File(_))) val metadataCache0: MetadataCache = mock(classOf[MetadataCache]) val metadataCache1: MetadataCache = mock(classOf[MetadataCache]) @@ -3646,51 +3294,53 @@ class ReplicaManagerTest { @ValueSource(booleans = Array(true, false)) def testOffsetOutOfRangeExceptionWhenReadFromLog(isFromFollower: Boolean): Unit = { val replicaId = if (isFromFollower) 1 else -1 + val fetchMaxBytes = 150 + val partitionMaxBytes = 100 val tp0 = new TopicPartition(topic, 0) + val tp02 = new TopicPartition(topic2, 0) val tidp0 = new TopicIdPartition(topicId, tp0) + val tidp02 = new TopicIdPartition(topicId2, tp02) // create a replicaManager with remoteLog enabled val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteFetchQuotaExceeded = Some(false)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp02).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId2)) val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava + val topicIds = Map(tp0.topic -> topicId, tp02.topic -> topicId2).asJava val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(leaderEpoch) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - - val params = new FetchParams(replicaId, 1, 1000, 0, 100, FetchIsolation.LOG_END, Optional.empty) - // when reading log, it'll throw OffsetOutOfRangeException, which will be handled separately - val result = replicaManager.readFromLog(params, Seq(tidp0 -> new PartitionData(topicId, 1, 0, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch))), UNBOUNDED_QUOTA, false) - - if (isFromFollower) { - // expect OFFSET_MOVED_TO_TIERED_STORAGE error returned if it's from follower, since the data is already available in remote log - assertEquals(Errors.OFFSET_MOVED_TO_TIERED_STORAGE, result.head._2.error) - } else { - assertEquals(Errors.NONE, result.head._2.error) - } - assertEquals(startOffset, result.head._2.leaderLogStartOffset) - assertEquals(endOffset, result.head._2.leaderLogEndOffset) - assertEquals(highHW, result.head._2.highWatermark) - if (isFromFollower) { - assertFalse(result.head._2.info.delayedRemoteStorageFetch.isPresent) - } else { - // for consumer fetch, we should return a delayedRemoteStorageFetch to wait for remote fetch - assertTrue(result.head._2.info.delayedRemoteStorageFetch.isPresent) + val delta = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas) + val delta2 = createLeaderDelta(topicIds.get(topic2), tp02, partition0Replicas.get(0), partition0Replicas, partition0Replicas) + val leaderMetadataImage = imageFromTopics(delta.apply()) + val leaderMetadataImage2 = imageFromTopics(delta2.apply()) + replicaManager.applyDelta(delta, leaderMetadataImage) + replicaManager.applyDelta(delta2, leaderMetadataImage2) + + val params = new FetchParams(replicaId, 1, 100, 0, fetchMaxBytes, FetchIsolation.LOG_END, Optional.empty) + // when reading logs from 2 partitions, they'll throw OffsetOutOfRangeException, which will be handled separately + val results = replicaManager.readFromLog(params, Seq( + tidp0 -> new PartitionData(topicId, 1, 0, partitionMaxBytes, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)), + tidp02 -> new PartitionData(topicId2, 1, 0, partitionMaxBytes, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch))), UNBOUNDED_QUOTA, false) + + results.foreach { case (tidp, partitionData) => + assertEquals(startOffset, partitionData.leaderLogStartOffset) + assertEquals(endOffset, partitionData.leaderLogEndOffset) + assertEquals(highHW, partitionData.highWatermark) + if (isFromFollower) { + // expect OFFSET_MOVED_TO_TIERED_STORAGE error returned if it's from follower, since the data is already available in remote log + assertEquals(Errors.OFFSET_MOVED_TO_TIERED_STORAGE, partitionData.error) + assertFalse(partitionData.info.delayedRemoteStorageFetch.isPresent) + } else { + assertEquals(Errors.NONE, partitionData.error) + // for consumer fetch, we should return a delayedRemoteStorageFetch to wait for remote fetch + assertTrue(partitionData.info.delayedRemoteStorageFetch.isPresent) + // verify the 1st partition will set the fetchMaxBytes to partitionMaxBytes, + // and the 2nd one will set to the remaining (fetchMaxBytes - partitionMaxBytes) to meet the "fetch.max.bytes" config. + if (tidp.topic == topic) + assertEquals(partitionMaxBytes, partitionData.info.delayedRemoteStorageFetch.get().fetchMaxBytes) + else + assertEquals(fetchMaxBytes - partitionMaxBytes, partitionData.info.delayedRemoteStorageFetch.get().fetchMaxBytes) + } } } finally { replicaManager.shutdown(checkpointHW = false) @@ -3700,6 +3350,7 @@ class ReplicaManagerTest { @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testOffsetOutOfRangeExceptionWhenFetchMessages(isFromFollower: Boolean): Unit = { + val brokerList = Seq[Integer](0, 1).asJava val replicaId = if (isFromFollower) 1 else -1 val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) @@ -3707,26 +3358,12 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog= true, remoteFetchQuotaExceeded = Some(false)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) val topicIds = Map(tp0.topic -> topicId).asJava val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(leaderEpoch) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val delta = createLeaderDelta(topicIds.get(topic), tp0, brokerList.get(0), brokerList, brokerList) + val leaderMetadataImage = imageFromTopics(delta.apply()) + replicaManager.applyDelta(delta, leaderMetadataImage) val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) val fetchOffset = 1 @@ -3751,7 +3388,7 @@ class ReplicaManagerTest { } else { verify(mockRemoteLogManager).asyncRead(remoteStorageFetchInfoArg.capture(), any()) val remoteStorageFetchInfo = remoteStorageFetchInfoArg.getValue - assertEquals(tp0, remoteStorageFetchInfo.topicPartition) + assertEquals(tp0, remoteStorageFetchInfo.topicIdPartition.topicPartition) assertEquals(fetchOffset, remoteStorageFetchInfo.fetchInfo.fetchOffset) assertEquals(topicId, remoteStorageFetchInfo.fetchInfo.topicId) assertEquals(startOffset, remoteStorageFetchInfo.fetchInfo.logStartOffset) @@ -3799,26 +3436,12 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(spyRLM)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(leaderEpoch) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) val fetchOffset = 1 @@ -3912,26 +3535,12 @@ class ReplicaManagerTest { try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(leaderEpoch) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) val mockLog = replicaManager.getPartitionOrException(tp0).log.get when(mockLog.endOffsetForEpoch(anyInt())).thenReturn(Optional.of(new OffsetAndEpoch(1, 1))) @@ -3983,6 +3592,109 @@ class ReplicaManagerTest { } } + @Test + def testMultipleRemoteFetchesInOneFetchRequest(): Unit = { + val replicaId = -1 + val tp0 = new TopicPartition(topic, 0) + val tp1 = new TopicPartition(topic, 1) + val tidp0 = new TopicIdPartition(topicId, tp0) + val tidp1 = new TopicIdPartition(topicId, tp1) + + val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteFetchQuotaExceeded = Some(false)) + + try { + val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp1).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + + val leaderEpoch = 0 + val leaderDelta0 = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) + val leaderDelta1 = createLeaderDelta(topicId, tp1, leaderId = 0, leaderEpoch = leaderEpoch) + val leaderMetadataImage0 = imageFromTopics(leaderDelta0.apply()) + val leaderMetadataImage1 = imageFromTopics(leaderDelta1.apply()) + replicaManager.applyDelta(leaderDelta0, leaderMetadataImage0) + replicaManager.applyDelta(leaderDelta1, leaderMetadataImage1) + + val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) + val fetchOffsetTp0 = 1 + val fetchOffsetTp1 = 2 + + val responseSeq = new AtomicReference[Seq[(TopicIdPartition, FetchPartitionData)]]() + val responseLatch = new CountDownLatch(1) + + def fetchCallback(responseStatus: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { + responseSeq.set(responseStatus) + responseLatch.countDown() + } + + val callbacks: util.Set[Consumer[RemoteLogReadResult]] = new util.HashSet[Consumer[RemoteLogReadResult]]() + when(mockRemoteLogManager.asyncRead(any(), any())).thenAnswer(ans => { + callbacks.add(ans.getArgument(1, classOf[Consumer[RemoteLogReadResult]])) + mock(classOf[Future[Void]]) + }) + + // Start the fetch request for both partitions - this should trigger remote fetches since + // the default mocked log behavior throws OffsetOutOfRangeException + replicaManager.fetchMessages(params, Seq( + tidp0 -> new PartitionData(topicId, fetchOffsetTp0, startOffset, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)), + tidp1 -> new PartitionData(topicId, fetchOffsetTp1, startOffset, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)) + ), UNBOUNDED_QUOTA, fetchCallback) + + // Verify that exactly two asyncRead calls were made (one for each partition) + val remoteStorageFetchInfoArg: ArgumentCaptor[RemoteStorageFetchInfo] = ArgumentCaptor.forClass(classOf[RemoteStorageFetchInfo]) + verify(mockRemoteLogManager, times(2)).asyncRead(remoteStorageFetchInfoArg.capture(), any()) + + // Verify that remote fetch operations were properly set up for both partitions + assertTrue(replicaManager.delayedRemoteFetchPurgatory.watched == 2, "DelayedRemoteFetch purgatory should have operations") + + // Verify both partitions were captured in the remote fetch requests + val capturedFetchInfos = remoteStorageFetchInfoArg.getAllValues.asScala + assertEquals(2, capturedFetchInfos.size, "Should have 2 remote storage fetch info calls") + + val capturedTopicPartitions = capturedFetchInfos.map(_.topicIdPartition.topicPartition).toSet + assertTrue(capturedTopicPartitions.contains(tp0), "Should contain " + tp0) + assertTrue(capturedTopicPartitions.contains(tp1), "Should contain " + tp1) + + // Verify the fetch info details are correct for both partitions + capturedFetchInfos.foreach { fetchInfo => + assertEquals(topicId, fetchInfo.fetchInfo.topicId) + assertEquals(startOffset, fetchInfo.fetchInfo.logStartOffset) + assertEquals(leaderEpoch, fetchInfo.fetchInfo.currentLeaderEpoch.get()) + if (fetchInfo.topicIdPartition.topicPartition == tp0) { + assertEquals(fetchOffsetTp0, fetchInfo.fetchInfo.fetchOffset) + } else { + assertEquals(fetchOffsetTp1, fetchInfo.fetchInfo.fetchOffset) + } + } + + // Complete the 2 asyncRead tasks + callbacks.forEach(callback => callback.accept(buildRemoteReadResult(Errors.NONE))) + + // Wait for the fetch callback to complete and verify responseSeq content + assertTrue(responseLatch.await(5, TimeUnit.SECONDS), "Fetch callback should complete") + + val responseData = responseSeq.get() + assertNotNull(responseData, "Response sequence should not be null") + assertEquals(2, responseData.size, "Response should contain data for both partitions") + + // Verify that response contains both tidp0 and tidp1 and have no errors + val responseTopicIdPartitions = responseData.map(_._1).toSet + assertTrue(responseTopicIdPartitions.contains(tidp0), "Response should contain " + tidp0) + assertTrue(responseTopicIdPartitions.contains(tidp1), "Response should contain " + tidp1) + responseData.foreach { case (_, fetchPartitionData) => + assertEquals(Errors.NONE, fetchPartitionData.error) + } + } finally { + replicaManager.shutdown(checkpointHW = false) + } + } + + private def buildRemoteReadResult(error: Errors): RemoteLogReadResult = { + new RemoteLogReadResult( + Optional.of(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY)), + if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) + } + private def yammerMetricValue(name: String): Any = { val allMetrics = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala val (_, metric) = allMetrics.find { case (n, _) => n.getMBeanName.endsWith(name) } @@ -3997,8 +3709,6 @@ class ReplicaManagerTest { @Test def testSuccessfulBuildRemoteLogAuxStateMetrics(): Unit = { - val tp0 = new TopicPartition(topic, 0) - val remoteLogManager = mock(classOf[RemoteLogManager]) val remoteLogSegmentMetadata = mock(classOf[RemoteLogSegmentMetadata]) when(remoteLogManager.fetchRemoteLogSegmentMetadata(any(), anyInt(), anyLong())).thenReturn( @@ -4007,43 +3717,29 @@ class ReplicaManagerTest { val storageManager = mock(classOf[RemoteStorageManager]) when(storageManager.fetchIndex(any(), any())).thenReturn(new ByteArrayInputStream("0".getBytes())) when(remoteLogManager.storageManager()).thenReturn(storageManager) + when(remoteLogManager.isPartitionReady(any())).thenReturn(true) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { - val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + replicaManager.createPartition(topicPartition).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch - assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) - assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).failedBuildRemoteLogAuxStateRate.count) + assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count) + assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).failedBuildRemoteLogAuxStateRate.count) // Verify aggregate metrics assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, topicPartition, leaderId = 1, replicas = partition0Replicas, isr = partition0Replicas) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing - waitUntilTrue(() => brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0, - "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) - assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).failedBuildRemoteLogAuxStateRate.count) + waitUntilTrue(() => brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count > 0, + "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count) + assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).failedBuildRemoteLogAuxStateRate.count) // Verify aggregate metrics waitUntilTrue(() => brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count > 0, "Should have all topic buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) @@ -4065,24 +3761,7 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -4091,10 +3770,13 @@ class ReplicaManagerTest { assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val brokerList = Seq[Integer](0, 1).asJava + val delta = createLeaderDelta(topicId, new TopicPartition(topic, 0), brokerList.get(1), brokerList, brokerList) + val leaderMetadataImage = imageFromTopics(delta.apply()) + replicaManager.applyDelta(delta, leaderMetadataImage) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing - // We expect failedBuildRemoteLogAuxStateRate to increase because there is no remoteLogSegmentMetadata + // We expect failedBuildRemoteLogAuxStateRate to increase because the RemoteLogManager is not ready for the tp0 // when attempting to build log aux state TestUtils.waitUntilTrue(() => brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0, "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -4124,24 +3806,8 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId).asJava - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp0.topic) - .setPartitionIndex(tp0.partition) - .setControllerEpoch(0) - .setLeader(1) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -4150,7 +3816,9 @@ class ReplicaManagerTest { assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = partition0Replicas, isr = partition0Replicas) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing // We expect failedBuildRemoteLogAuxStateRate to increase because fetchRemoteLogSegmentMetadata returns RemoteStorageException @@ -4168,14 +3836,14 @@ class ReplicaManagerTest { } } - private def setupMockLog(path: String): UnifiedLog = { + private def setupMockLog(path: String, logConfig: LogConfig, enableRemoteStorage: Boolean, topicPartition: TopicPartition, topicId: Option[Uuid]): UnifiedLog = { val mockLog = mock(classOf[UnifiedLog]) - val partitionDir = new File(path, s"$topic-0") + val partitionDir = new File(path, s"$topicPartition") partitionDir.mkdir() when(mockLog.dir).thenReturn(partitionDir) when(mockLog.parentDir).thenReturn(path) - when(mockLog.topicId).thenReturn(Optional.of(topicId)) - when(mockLog.topicPartition).thenReturn(new TopicPartition(topic, 0)) + when(mockLog.topicId).thenReturn(topicId.toJava) + when(mockLog.topicPartition).thenReturn(topicPartition) when(mockLog.highWatermark).thenReturn(highHW) when(mockLog.updateHighWatermark(anyLong())).thenReturn(0L) when(mockLog.logEndOffsetMetadata).thenReturn(new LogOffsetMetadata(10)) @@ -4189,6 +3857,8 @@ class ReplicaManagerTest { when(mockLog.latestEpoch).thenReturn(Optional.of(0)) val producerStateManager = mock(classOf[ProducerStateManager]) when(mockLog.producerStateManager).thenReturn(producerStateManager) + when(mockLog.config).thenReturn(logConfig) + when(mockLog.remoteLogEnabled()).thenReturn(enableRemoteStorage) mockLog } @@ -4199,7 +3869,7 @@ class ReplicaManagerTest { def createReplicaManager(): ReplicaManager = { val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) new ReplicaManager( metrics = metrics, config = config, @@ -4231,30 +3901,14 @@ class ReplicaManagerTest { def testPartitionMetadataFile(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { - val brokerList = Seq[Integer](0, 1).asJava - val topicPartition = new TopicPartition(topic, 0) - val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) - val topicNames = topicIds.asScala.map(_.swap).asJava - - def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest = - new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(epoch) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - - val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ()) - assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) + val leaderDelta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), + topicName = topic, topicId = topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) + + assertTrue(replicaManager.getPartition(topicPartition).isInstanceOf[HostedPartition.Online]) assertFalse(replicaManager.localLog(topicPartition).isEmpty) - val id = topicIds.get(topicPartition.topic()) + val id = topicIds(topicPartition.topic) val log = replicaManager.localLog(topicPartition).get assertTrue(log.partitionMetadataFile.get.exists()) val partitionMetadata = log.partitionMetadataFile.get.read() @@ -4271,84 +3925,60 @@ class ReplicaManagerTest { def testInconsistentIdReturnsError(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { - val brokerList = Seq[Integer](0, 1).asJava - val topicPartition = new TopicPartition(topic, 0) - val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) - val topicNames = topicIds.asScala.map(_.swap).asJava - - val invalidTopicIds = Collections.singletonMap(topic, Uuid.randomUuid()) - val invalidTopicNames = invalidTopicIds.asScala.map(_.swap).asJava - - def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest = - new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(epoch) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - - val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ()) - assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) - - val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, topicIds), (_, _) => ()) - assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition)) + val invalidTopicId = Uuid.randomUuid() + + val initialDelta = topicsCreateDelta(0, isStartIdLeader = true, + partitions = List(0), topicName = topic, topicId = topicIds(topic)) + val initialImage = imageFromTopics(initialDelta.apply()) + replicaManager.applyDelta(initialDelta, initialImage) + + val updateDelta = topicsCreateDelta(0, isStartIdLeader = true, + partitions = List(0), topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) + val updateImage = imageFromTopics(updateDelta.apply()) + replicaManager.applyDelta(updateDelta, updateImage) // Send request with inconsistent ID. - val response3 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, invalidTopicIds), (_, _) => ()) - assertEquals(Errors.INCONSISTENT_TOPIC_ID, response3.partitionErrors(invalidTopicNames).get(topicPartition)) + val inconsistentDelta1 = topicsCreateDelta(0, isStartIdLeader = true, + partitions = List(0), topicName = topic, topicId = invalidTopicId, leaderEpoch = 1) + val inconsistentImage1 = imageFromTopics(inconsistentDelta1.apply()) + val exception1 = assertThrows(classOf[IllegalStateException], () => { + replicaManager.applyDelta(inconsistentDelta1, inconsistentImage1) + }) + assertEquals(s"Topic ${topic}-0 exists, but its ID is ${topicId}, not ${invalidTopicId} as expected", exception1.getMessage) + + val inconsistentDelta2 = topicsCreateDelta(0, isStartIdLeader = true, + partitions = List(0), topicName = topic, topicId = invalidTopicId, leaderEpoch = 2) + val inconsistentImage2 = imageFromTopics(inconsistentDelta2.apply()) + val exception2 = assertThrows(classOf[IllegalStateException], () => { + replicaManager.applyDelta(inconsistentDelta2, inconsistentImage2) + }) + assertEquals(s"Topic ${topic}-0 exists, but its ID is ${topicId}, not ${invalidTopicId} as expected", exception2.getMessage) - val response4 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(2, invalidTopicIds), (_, _) => ()) - assertEquals(Errors.INCONSISTENT_TOPIC_ID, response4.partitionErrors(invalidTopicNames).get(topicPartition)) } finally { replicaManager.shutdown(checkpointHW = false) } } @Test - def testPartitionMetadataFileNotCreated(): Unit = { + def testPartitionMetadataFileCreated(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { val brokerList = Seq[Integer](0, 1).asJava val topicPartition = new TopicPartition(topic, 0) - val topicPartitionFake = new TopicPartition("fakeTopic", 0) - val topicIds = Map(topic -> Uuid.ZERO_UUID, "foo" -> Uuid.randomUuid()).asJava - val topicNames = topicIds.asScala.map(_.swap).asJava - - def leaderAndIsrRequest(epoch: Int, name: String): LeaderAndIsrRequest = - new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(new LeaderAndIsrRequest.PartitionState() - .setTopicName(name) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(epoch) - .setIsr(brokerList) - .setPartitionEpoch(0) - .setReplicas(brokerList) - .setIsNew(true)).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - - // There is no file if the topic does not have an associated topic ID. - val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, "fakeTopic"), (_, _) => ()) - assertTrue(replicaManager.localLog(topicPartitionFake).isDefined) - val log = replicaManager.localLog(topicPartitionFake).get - assertFalse(log.partitionMetadataFile.get.exists()) - assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) - - // There is no file if the topic has the default UUID. - val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topic), (_, _) => ()) + + val leaderDelta = createLeaderDelta( + topicId = Uuid.ZERO_UUID, + partition = topicPartition, + leaderId = 0, + replicas = brokerList, + isr = brokerList, + ) + + // The file exists if the topic has the default UUID. + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) assertTrue(replicaManager.localLog(topicPartition).isDefined) - val log2 = replicaManager.localLog(topicPartition).get - assertFalse(log2.partitionMetadataFile.get.exists()) - assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition)) + val log = replicaManager.localLog(topicPartition).get + assertTrue(log.partitionMetadataFile.get.exists()) } finally { replicaManager.shutdown(checkpointHW = false) @@ -4369,21 +3999,22 @@ class ReplicaManagerTest { // Delete the data directory to trigger a storage exception Utils.delete(dataDir) - val request = makeLeaderAndIsrRequest( - topicId = Uuid.randomUuid(), - topicPartition = topicPartition, - replicas = Seq(0, 1), - leaderAndIsr = new LeaderAndIsr(if (becomeLeader) 0 else 1, List(0, 1).map(Int.box).asJava) + val leaderDelta = createLeaderDelta( + topicId = topicId, + partition = topicPartition, + leaderId = if (becomeLeader) 0 else 1, + replicas = util.Arrays.asList(0 , 1), + isr = util.Arrays.asList(0, 1), ) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) - replicaManager.becomeLeaderOrFollower(0, request, (_, _) => ()) val hostedPartition = replicaManager.getPartition(topicPartition) assertEquals( classOf[HostedPartition.Offline], hostedPartition.getClass ) assertEquals( - request.topicIds().get(topicPartition.topic()), + topicId, hostedPartition.asInstanceOf[HostedPartition.Offline].partition.flatMap(p => p.topicId).get ) } finally { @@ -4391,43 +4022,6 @@ class ReplicaManagerTest { } } - private def makeLeaderAndIsrRequest( - topicId: Uuid, - topicPartition: TopicPartition, - replicas: Seq[Int], - leaderAndIsr: LeaderAndIsr, - isNew: Boolean = true, - brokerEpoch: Int = 0, - controllerId: Int = 0, - controllerEpoch: Int = 0 - ): LeaderAndIsrRequest = { - val partitionState = new LeaderAndIsrRequest.PartitionState() - .setTopicName(topicPartition.topic) - .setPartitionIndex(topicPartition.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(leaderAndIsr.leader) - .setLeaderEpoch(leaderAndIsr.leaderEpoch) - .setIsr(leaderAndIsr.isr) - .setPartitionEpoch(leaderAndIsr.partitionEpoch) - .setReplicas(replicas.map(Int.box).asJava) - .setIsNew(isNew) - - def mkNode(replicaId: Int): Node = { - new Node(replicaId, s"host-$replicaId", 9092) - } - - val nodes = Set(mkNode(controllerId)) ++ replicas.map(mkNode).toSet - - new LeaderAndIsrRequest.Builder( - controllerId, - controllerEpoch, - brokerEpoch, - Seq(partitionState).asJava, - Map(topicPartition.topic -> topicId).asJava, - nodes.asJava - ).build() - } - @Test def testActiveProducerState(): Unit = { val brokerId = 0 @@ -4443,35 +4037,87 @@ class ReplicaManagerTest { val oofProducerState = replicaManager.activeProducerState(oofPartition) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, Errors.forCode(oofProducerState.errorCode)) - // This API is supported by both leaders and followers - val barPartition = new TopicPartition("bar", 0) - val barLeaderAndIsrRequest = makeLeaderAndIsrRequest( - topicId = Uuid.randomUuid(), - topicPartition = barPartition, - replicas = Seq(brokerId), - leaderAndIsr = new LeaderAndIsr(brokerId, List(brokerId).map(Int.box).asJava) - ) - replicaManager.becomeLeaderOrFollower(0, barLeaderAndIsrRequest, (_, _) => ()) + val barTopicId = Uuid.randomUuid() + + val leaderDelta = createLeaderDelta(barTopicId, barPartition, brokerId) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + val barProducerState = replicaManager.activeProducerState(barPartition) assertEquals(Errors.NONE, Errors.forCode(barProducerState.errorCode)) - val otherBrokerId = 1 val bazPartition = new TopicPartition("baz", 0) - val bazLeaderAndIsrRequest = makeLeaderAndIsrRequest( - topicId = Uuid.randomUuid(), - topicPartition = bazPartition, - replicas = Seq(brokerId, otherBrokerId), - leaderAndIsr = new LeaderAndIsr(otherBrokerId, List(brokerId, otherBrokerId).map(Int.box).asJava) - ) - replicaManager.becomeLeaderOrFollower(0, bazLeaderAndIsrRequest, (_, _) => ()) + val bazTopicId = Uuid.randomUuid() + val otherBrokerId = 1 + + val followerDelta = createFollowerDelta(bazTopicId, bazPartition, brokerId, otherBrokerId) + val followerMetadataImage = imageFromTopics(followerDelta.apply()) + replicaManager.applyDelta(followerDelta, followerMetadataImage) + val bazProducerState = replicaManager.activeProducerState(bazPartition) assertEquals(Errors.NONE, Errors.forCode(bazProducerState.errorCode)) + } finally { replicaManager.shutdown(checkpointHW = false) } } + private def createLeaderDelta( + topicId: Uuid, + partition: TopicPartition, + leaderId: Integer, + replicas: util.List[Integer] = null, + isr: util.List[Integer] = null, + leaderEpoch: Int = 0): TopicsDelta = { + val delta = new TopicsDelta(TopicsImage.EMPTY) + val effectiveReplicas = Option(replicas).getOrElse(java.util.List.of(leaderId)) + val effectiveIsr = Option(isr).getOrElse(java.util.List.of(leaderId)) + + delta.replay(new TopicRecord() + .setName(partition.topic) + .setTopicId(topicId) + ) + + delta.replay(new PartitionRecord() + .setPartitionId(partition.partition) + .setTopicId(topicId) + .setReplicas(effectiveReplicas) + .setIsr(effectiveIsr) + .setLeader(leaderId) + .setLeaderEpoch(leaderEpoch) + .setPartitionEpoch(0) + ) + + delta + } + + private def createFollowerDelta( + topicId: Uuid, + partition: TopicPartition, + followerId: Int, + leaderId: Int, + leaderEpoch: Int = 0): TopicsDelta = { + val delta = new TopicsDelta(TopicsImage.EMPTY) + + delta.replay(new TopicRecord() + .setName(partition.topic) + .setTopicId(topicId) + ) + + delta.replay(new PartitionRecord() + .setPartitionId(partition.partition) + .setTopicId(topicId) + .setReplicas(util.Arrays.asList(followerId, leaderId)) + .setIsr(util.Arrays.asList(followerId, leaderId)) + .setLeader(leaderId) + .setLeaderEpoch(leaderEpoch) + .setPartitionEpoch(0) + ) + + delta + } + val FOO_UUID = Uuid.fromString("fFJBx0OmQG-UqeaT6YaSwA") val BAR_UUID = Uuid.fromString("vApAP6y7Qx23VOfKBzbOBQ") @@ -4511,14 +4157,16 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, "foo") val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + val aliveBrokerIds = Array(1, 2) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) try { val directoryIds = replicaManager.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) - val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = directoryIds) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = directoryIds) val (partition: Partition, isNewWhenCreatedForFirstTime: Boolean) = replicaManager.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), + isNew = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) @@ -4563,7 +4211,7 @@ class ReplicaManagerTest { // Test applying delta as leader val directoryIds = replicaManager.logManager.directoryIdsSet.toList // Make the local replica the leader - val leaderTopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = directoryIds) + val leaderTopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = directoryIds) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) @@ -4573,7 +4221,7 @@ class ReplicaManagerTest { assertEquals(directoryIds.head, logDirIdHostingPartition0) // Test applying delta as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = directoryIds) + val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = directoryIds) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4601,7 +4249,7 @@ class ReplicaManagerTest { try { // Make the local replica the leader - val leaderTopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) + val leaderTopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id val topicIdPartition0 = new TopicIdPartition(topicId, topicPartition0) @@ -4609,7 +4257,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) // Make the local replica the as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) + val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4655,7 +4303,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) // Make the local replica the as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = List(DirectoryId.LOST, DirectoryId.LOST)) + val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = List(DirectoryId.LOST, DirectoryId.LOST)) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4694,8 +4342,7 @@ class ReplicaManagerTest { // Make the local replica the leader val leaderTopicsDelta = topicsCreateDelta(localId, true) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) - val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id - val topicIdPartition = new TopicIdPartition(topicId, topicPartition) + val topicIdPartition = new TopicIdPartition(FOO_UUID, topicPartition) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) @@ -4713,7 +4360,7 @@ class ReplicaManagerTest { } // Send a produce request and advance the highwatermark - val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) fetchPartitionAsFollower( replicaManager, topicIdPartition, @@ -4728,7 +4375,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) // Append on a follower should fail - val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) + val followerResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error) // Check the state of that partition and fetcher @@ -4778,18 +4425,19 @@ class ReplicaManagerTest { assertEquals(Some(new BrokerEndPoint(otherId, otherEndpoint.host(), otherEndpoint.port())), fetcher.map(_.leader.brokerEndPoint())) // Append on a follower should fail - val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) + val followerResponse = sendProducerAppend(replicaManager, + new TopicIdPartition(followerMetadataImage.topics().topicsByName().get("foo").id, topicPartition), + numOfRecords) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error) // Change the local replica to leader val leaderTopicsDelta = topicsChangeDelta(followerMetadataImage.topics(), localId, true) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) - val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id - val topicIdPartition = new TopicIdPartition(topicId, topicPartition) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) + val topicIdPartition = new TopicIdPartition(leaderMetadataImage.topics().topicsByName().get("foo").id, topicPartition) // Send a produce request and advance the highwatermark - val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) fetchPartitionAsFollower( replicaManager, topicIdPartition, @@ -5055,7 +4703,8 @@ class ReplicaManagerTest { val localId = 1 val otherId = localId + 1 val numOfRecords = 3 - val topicPartition = new TopicPartition("foo", 0) + val topicIdPartition = new TopicIdPartition(FOO_UUID, 0, "foo") + val topicPartition = topicIdPartition.topicPartition() val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, enableRemoteStorage = enableRemoteStorage) try { @@ -5078,7 +4727,7 @@ class ReplicaManagerTest { assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition)) // Send a produce request - val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) // Change the local replica to follower val followerTopicsDelta = topicsChangeDelta(leaderMetadataImage.topics(), localId, false) @@ -5420,12 +5069,10 @@ class ReplicaManagerTest { val foo2 = new TopicPartition("foo", 2) val mockReplicaFetcherManager = mock(classOf[ReplicaFetcherManager]) - val isShuttingDown = new AtomicBoolean(false) val replicaManager = setupReplicaManagerWithMockedPurgatories( timer = new MockTimer(time), brokerId = localId, mockReplicaFetcherManager = Some(mockReplicaFetcherManager), - isShuttingDown = isShuttingDown, enableRemoteStorage = enableRemoteStorage ) @@ -5503,10 +5150,6 @@ class ReplicaManagerTest { reset(mockReplicaFetcherManager) - // The broker transitions to SHUTTING_DOWN state. This should not have - // any impact in KRaft mode. - isShuttingDown.set(true) - // The replica begins the controlled shutdown. replicaManager.beginControlledShutdown() @@ -5582,22 +5225,15 @@ class ReplicaManagerTest { assertFalse(replicaManager.maybeAddListener(tp, listener)) // Broker 0 becomes leader of the partition - val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(0) - .setReplicas(replicas) - .setIsNew(true) - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq(leaderAndIsrPartitionState).asJava, - Collections.singletonMap(topic, topicId), - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) - assertEquals(Errors.NONE, leaderAndIsrResponse.error) + val leaderDelta = createLeaderDelta( + topicId = topicId, + partition = tp, + leaderId = 0, + replicas = replicas, + isr = replicas, + leaderEpoch = leaderEpoch + ) + replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) // Registering it should succeed now. assertTrue(replicaManager.maybeAddListener(tp, listener)) @@ -5651,29 +5287,30 @@ class ReplicaManagerTest { } } - private def topicsCreateDelta(startId: Int, isStartIdLeader: Boolean, partition:Int = 0, directoryIds: List[Uuid] = List.empty): TopicsDelta = { + private def topicsCreateDelta(startId: Int, isStartIdLeader: Boolean, partitions:List[Int] = List(0), directoryIds: List[Uuid] = List.empty, topicName: String = "foo", topicId: Uuid = FOO_UUID, leaderEpoch: Int = 0): TopicsDelta = { val leader = if (isStartIdLeader) startId else startId + 1 val delta = new TopicsDelta(TopicsImage.EMPTY) - delta.replay(new TopicRecord().setName("foo").setTopicId(FOO_UUID)) - val record = partitionRecord(startId, leader, partition) - if (directoryIds.nonEmpty) { - record.setDirectories(directoryIds.asJava) + delta.replay(new TopicRecord().setName(topicName).setTopicId(topicId)) + + partitions.foreach { partition => + val record = partitionRecord(startId, leader, partition, topicId, leaderEpoch) + if (directoryIds.nonEmpty) { + record.setDirectories(directoryIds.asJava) + } + delta.replay(record) } - delta.replay(record) delta } - private def partitionRecord(startId: Int, leader: Int, partition: Int = 0) = { + private def partitionRecord(startId: Int, leader: Int, partition: Int = 0, topicId: Uuid = FOO_UUID, leaderEpoch: Int = 0) = { new PartitionRecord() .setPartitionId(partition) - .setTopicId(FOO_UUID) + .setTopicId(topicId) .setReplicas(util.Arrays.asList(startId, startId + 1)) .setIsr(util.Arrays.asList(startId, startId + 1)) - .setRemovingReplicas(Collections.emptyList()) - .setAddingReplicas(Collections.emptyList()) .setLeader(leader) - .setLeaderEpoch(0) + .setLeaderEpoch(leaderEpoch) .setPartitionEpoch(0) } @@ -5726,8 +5363,74 @@ class ReplicaManagerTest { assertEquals(expectedTopicId, fetchState.get.topicId) } + @Test + def testReplicaAlterLogDirsMultipleReassignmentDoesNotBlockLogCleaner(): Unit = { + val localId = 0 + val tp = new TopicPartition(topic, 0) + val tpId = new TopicIdPartition(topicId, tp) + + val props = TestUtils.createBrokerConfig(localId) + val path1 = TestUtils.tempRelativeDir("data").getAbsolutePath + val path2 = TestUtils.tempRelativeDir("data2").getAbsolutePath + val path3 = TestUtils.tempRelativeDir("data3").getAbsolutePath + props.put("log.dirs", Seq(path1, path2, path3).mkString(",")) + val config = KafkaConfig.fromProps(props) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), cleanerConfig = new CleanerConfig(true)) + mockLogMgr.startup(Set()) + val replicaManager = new ReplicaManager( + metrics = metrics, + config = config, + time = time, + scheduler = new MockScheduler(time), + logManager = mockLogMgr, + quotaManagers = quotaManager, + metadataCache = metadataCache, + logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), + alterPartitionManager = alterPartitionManager, + addPartitionsToTxnManager = Some(addPartitionsToTxnManager)) + + try { + val spiedPartition = spy(Partition(tpId, time, replicaManager)) + replicaManager.addOnlinePartition(tp, spiedPartition) + + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) + + // Move the replica to the second log directory. + val partition = replicaManager.getPartitionOrException(tp) + val firstLogDir = partition.log.get.dir.getParentFile + val newReplicaFolder = replicaManager.logManager.liveLogDirs.filterNot(_ == firstLogDir).head + replicaManager.alterReplicaLogDirs(Map(tp -> newReplicaFolder.getAbsolutePath)) + + // Prevent promotion of future replica + doReturn(false).when(spiedPartition).maybeReplaceCurrentWithFutureReplica() + + // Make sure the future log is created with the correct topic ID. + val futureLog = replicaManager.futureLocalLogOrException(tp) + assertEquals(Optional.of(topicId), futureLog.topicId) + + // Move the replica to the third log directory + val finalReplicaFolder = replicaManager.logManager.liveLogDirs.filterNot(it => it == firstLogDir || it == newReplicaFolder).head + replicaManager.alterReplicaLogDirs(Map(tp -> finalReplicaFolder.getAbsolutePath)) + + reset(spiedPartition) + + TestUtils.waitUntilTrue(() => { + replicaManager.replicaAlterLogDirsManager.shutdownIdleFetcherThreads() + replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.isEmpty + }, s"ReplicaAlterLogDirsThread should be gone", waitTimeMs = 60_000) + + verify(replicaManager.logManager.cleaner, times(2)).resumeCleaning(Set(tp).asJava) + } finally { + replicaManager.shutdown(checkpointHW = false) + mockLogMgr.shutdown() + } + } + @Test def testReplicaAlterLogDirs(): Unit = { + val localId = 0 val tp = new TopicPartition(topic, 0) val mockReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) @@ -5744,13 +5447,9 @@ class ReplicaManagerTest { topicId = None ) - val leaderAndIsrRequest = makeLeaderAndIsrRequest( - topicId = topicId, - topicPartition = tp, - replicas = Seq(0, 1), - leaderAndIsr = new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava), - ) - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) + val leaderImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderImage) // Move the replica to the second log directory. val partition = replicaManager.getPartitionOrException(tp) @@ -5792,7 +5491,7 @@ class ReplicaManagerTest { try { val responses = replicaManager.describeLogDirs(Set(new TopicPartition(topic, topicPartition))) assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.foreach { response => + responses.forEach { response => assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) @@ -5824,7 +5523,7 @@ class ReplicaManagerTest { try { val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.foreach { response => + responses.forEach { response => assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) @@ -5837,7 +5536,7 @@ class ReplicaManagerTest { @Test def testCheckpointHwOnShutdown(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val spyRm = spy(new ReplicaManager( metrics = metrics, config = config, @@ -6019,13 +5718,15 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + val aliveBrokerIds = Array(1, 2) val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) val directoryIds = rm.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), + isNew = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) @@ -6046,15 +5747,17 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + val aliveBrokerIds = Array(1, 2) val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) val directoryIds = rm.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), - new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), - None) + partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), + isNew = false, + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { assert(responseStatus.values.head.errorCode == Errors.NONE.code) @@ -6071,7 +5774,7 @@ class ReplicaManagerTest { @Test def testDelayedShareFetchPurgatoryOperationExpiration(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -6109,13 +5812,14 @@ class ReplicaManagerTest { mock(classOf[BiConsumer[SharePartitionKey, Throwable]]), sharePartitions, mock(classOf[ShareGroupMetrics]), - time)) + time, + 500)) val delayedShareFetchWatchKeys : util.List[DelayedShareFetchKey] = new util.ArrayList[DelayedShareFetchKey] topicPartitions.forEach((topicIdPartition: TopicIdPartition) => delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId, topicIdPartition.partition))) // You cannot acquire records for sp1, so request will be stored in purgatory waiting for timeout. - when(sp1.maybeAcquireFetchLock).thenReturn(false) + when(sp1.maybeAcquireFetchLock(any())).thenReturn(false) rm.addDelayedShareFetchRequest(delayedShareFetch = delayedShareFetch, delayedShareFetchKeys = delayedShareFetchWatchKeys) verify(delayedShareFetch, times(0)).forceComplete() @@ -6178,8 +5882,8 @@ class ReplicaManagerTest { internalTopicsAllowed = true, origin = AppendOrigin.CLIENT, entriesPerPartition = Map( - foo.topicPartition -> records, - bar.topicPartition -> records + foo -> records, + bar -> records ), requestLocal = RequestLocal.noCaching ) @@ -6187,13 +5891,13 @@ class ReplicaManagerTest { assertNotNull(result) assertEquals(2, result.size) - val fooResult = result(foo.topicPartition) + val fooResult = result(foo) assertEquals(Errors.NONE, fooResult.error) assertEquals(0, fooResult.info.logStartOffset) assertEquals(0, fooResult.info.firstOffset) assertEquals(0, fooResult.info.lastOffset) - val barResult = result(bar.topicPartition) + val barResult = result(bar) assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, barResult.error) assertEquals(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, barResult.info) } finally { @@ -6216,26 +5920,12 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints = offsetCheckpoints, None) + replicaManager.createPartition(tp).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints = offsetCheckpoints, Some(topicId)) val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp.topic -> topicId).asJava val leaderEpoch = 0 - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, - Seq( - new LeaderAndIsrRequest.PartitionState() - .setTopicName(tp.topic) - .setPartitionIndex(tp.partition) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(partition0Replicas) - .setPartitionEpoch(0) - .setReplicas(partition0Replicas) - .setIsNew(true) - ).asJava, - topicIds, - Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() - replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + val leaderDelta = createLeaderDelta(topicId, tp, leaderId = 0, leaderEpoch = leaderEpoch, replicas = partition0Replicas, isr = partition0Replicas) + val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) + replicaManager.applyDelta(leaderDelta, leaderMetadataImage) val params = new FetchParams(-1, 1, 1000, 0, 100, FetchIsolation.HIGH_WATERMARK, Optional.empty) replicaManager.readFromLog( diff --git a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala index ae1e0a1f5e871..42a5e8accc9c3 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala @@ -37,9 +37,7 @@ import org.apache.kafka.server.common.{Feature, MetadataVersion} import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.AfterEach -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, Test} import scala.jdk.CollectionConverters._ import scala.util.Using @@ -67,15 +65,13 @@ class ReplicationQuotasTest extends QuorumTestHarness { super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldBootstrapTwoBrokersWithLeaderThrottle(quorum: String): Unit = { + @Test + def shouldBootstrapTwoBrokersWithLeaderThrottle(): Unit = { shouldMatchQuotaReplicatingThroughAnAsymmetricTopology(true) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldBootstrapTwoBrokersWithFollowerThrottle(quorum: String): Unit = { + @Test + def shouldBootstrapTwoBrokersWithFollowerThrottle(): Unit = { shouldMatchQuotaReplicatingThroughAnAsymmetricTopology(false) } @@ -194,9 +190,8 @@ class ReplicationQuotasTest extends QuorumTestHarness { def tp(partition: Int): TopicPartition = new TopicPartition(topic, partition) - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldThrottleOldSegments(quorum: String): Unit = { + @Test + def shouldThrottleOldSegments(): Unit = { /** * Simple test which ensures throttled replication works when the dataset spans many segments */ diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 6f16f1b7a73ff..e188889556b7c 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -43,14 +43,12 @@ import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, A import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import java.net.InetAddress import java.util import java.util.concurrent.{Executors, Future, TimeUnit} -import java.util.{Collections, Optional, Properties} +import java.util.{Optional, Properties} import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ @@ -133,32 +131,74 @@ class RequestQuotaTest extends BaseRequestTest { finally super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testResponseThrottleTime(quorum: String): Unit = { + @Test + def testResponseThrottleTime(): Unit = { for (apiKey <- clientActions ++ clusterActionsWithThrottleForBroker) submitTest(apiKey, () => checkRequestThrottleTime(apiKey)) waitAndCheckResults() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testResponseThrottleTimeWhenBothProduceAndRequestQuotasViolated(quorum: String): Unit = { + @Test + def testResponseThrottleTimeWhenBothProduceAndRequestQuotasViolated(): Unit = { submitTest(ApiKeys.PRODUCE, () => checkSmallQuotaProducerRequestThrottleTime()) waitAndCheckResults() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testResponseThrottleTimeWhenBothFetchAndRequestQuotasViolated(quorum: String): Unit = { + @Test + def testResponseThrottleTimeWhenBothFetchAndRequestQuotasViolated(): Unit = { submitTest(ApiKeys.FETCH, () => checkSmallQuotaConsumerRequestThrottleTime()) waitAndCheckResults() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnthrottledClient(quorum: String): Unit = { + @Test + def testResponseThrottleTimeWhenBothShareFetchAndRequestQuotasViolated(): Unit = { + submitTest(ApiKeys.SHARE_FETCH, () => checkSmallQuotaShareFetchRequestThrottleTime()) + waitAndCheckResults() + } + + + @Test + def testShareFetchUsesSameFetchSensor(): Unit = { + // This test verifies that ShareFetch and Fetch use the same FETCH quota sensor per KIP-932 + val testClientId = "same-sensor-test-client" + + val quotaProps = new Properties() + quotaProps.put(QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, "1") // Very small quota + quotaProps.put(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, "0.01") // Very small request quota + changeClientIdConfig(Sanitizer.sanitize(testClientId), quotaProps) + + TestUtils.retry(20000) { + val consumeQuotaManager = brokers.head.dataPlaneRequestProcessor.quotas.fetch + assertEquals(Quota.upperBound(1), consumeQuotaManager.quota("some-user", testClientId), + s"Consumer quota override not set") + } + + // First, make a Fetch request and verify it uses FETCH quota + val fetchClient = Client(testClientId, ApiKeys.FETCH) + val fetchThrottled = fetchClient.runUntil(_.throttleTimeMs > 0) + assertTrue(fetchThrottled, "Fetch should be throttled") + + // Check quota types to verify which one is being used + val fetchThrottleTimeAfterFetch = throttleTimeMetricValueForQuotaType(testClientId, QuotaType.FETCH) + + // Now make a ShareFetch request and verify it ALSO uses FETCH quota sensor + val shareFetchClient = Client(testClientId, ApiKeys.SHARE_FETCH) + val shareFetchThrottled = shareFetchClient.runUntil(_.throttleTimeMs > 0) + assertTrue(shareFetchThrottled, "ShareFetch should be throttled") + + // Check quota types after ShareFetch + val fetchThrottleTimeAfterShareFetch = throttleTimeMetricValueForQuotaType(testClientId, QuotaType.FETCH) + + // Verify both requests use FETCH quota (not REQUEST quota) + assertTrue(!fetchThrottleTimeAfterFetch.isNaN && fetchThrottleTimeAfterFetch > 0, + s"Fetch should use FETCH quota sensor: $fetchThrottleTimeAfterFetch") + assertTrue(!fetchThrottleTimeAfterShareFetch.isNaN && fetchThrottleTimeAfterShareFetch > 0, + s"ShareFetch should use FETCH quota sensor: $fetchThrottleTimeAfterShareFetch") + } + + @Test + def testUnthrottledClient(): Unit = { for (apiKey <- clientActions) { submitTest(apiKey, () => checkUnthrottledClient(apiKey)) } @@ -166,9 +206,8 @@ class RequestQuotaTest extends BaseRequestTest { waitAndCheckResults() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testExemptRequestTime(quorum: String): Unit = { + @Test + def testExemptRequestTime(): Unit = { // Exclude `DESCRIBE_QUORUM`, maybe it shouldn't be a cluster action val actions = clusterActions -- clusterActionsWithThrottleForBroker -- RequestQuotaTest.Envelope -- RequestQuotaTest.ShareGroupState - ApiKeys.DESCRIBE_QUORUM for (apiKey <- actions) { @@ -178,9 +217,8 @@ class RequestQuotaTest extends BaseRequestTest { waitAndCheckResults() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testUnauthorizedThrottle(quorum: String): Unit = { + @Test + def testUnauthorizedThrottle(): Unit = { RequestQuotaTest.principal = RequestQuotaTest.UnauthorizedPrincipal val apiKeys = ApiKeys.brokerApis @@ -242,8 +280,8 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.PRODUCE => requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setName(tp.topic()).setPartitionData(Collections.singletonList( + util.List.of(new ProduceRequestData.TopicProduceData() + .setTopicId(getTopicIds().get(tp.topic()).get).setPartitionData(util.List.of( new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition()) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) @@ -257,30 +295,30 @@ class RequestQuotaTest extends BaseRequestTest { FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, 0, 0, partitionMap) case ApiKeys.METADATA => - new MetadataRequest.Builder(List(topic).asJava, true) + new MetadataRequest.Builder(util.List.of(topic), true) case ApiKeys.LIST_OFFSETS => val topic = new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(List(new ListOffsetsPartition() + .setPartitions(util.List.of(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(0L) - .setCurrentLeaderEpoch(15)).asJava) + .setCurrentLeaderEpoch(15))) ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(List(topic).asJava) + .setTargetTimes(util.List.of(topic)) case ApiKeys.OFFSET_COMMIT => - new OffsetCommitRequest.Builder( + OffsetCommitRequest.Builder.forTopicNames( new OffsetCommitRequestData() .setGroupId("test-group") .setGenerationIdOrMemberEpoch(1) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setTopics( - Collections.singletonList( + util.List.of( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topic) .setPartitions( - Collections.singletonList( + util.List.of( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) @@ -292,13 +330,25 @@ class RequestQuotaTest extends BaseRequestTest { ) ) case ApiKeys.OFFSET_FETCH => - new OffsetFetchRequest.Builder(Map("test-group"-> List(tp).asJava).asJava, false, false) + OffsetFetchRequest.Builder.forTopicNames( + new OffsetFetchRequestData() + .setGroups(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("test-group") + .setTopics(util.List.of( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(tp.topic) + .setPartitionIndexes(util.List.of[Integer](tp.partition)) + )) + )), + false + ) case ApiKeys.FIND_COORDINATOR => new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) - .setCoordinatorKeys(Collections.singletonList("test-group"))) + .setCoordinatorKeys(util.List.of("test-group"))) case ApiKeys.JOIN_GROUP => new JoinGroupRequest.Builder( @@ -310,7 +360,7 @@ class RequestQuotaTest extends BaseRequestTest { .setProtocolType("consumer") .setProtocols( new JoinGroupRequestProtocolCollection( - Collections.singletonList(new JoinGroupRequestData.JoinGroupRequestProtocol() + util.List.of(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("consumer-range") .setMetadata("test".getBytes())).iterator() ) @@ -329,7 +379,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.LEAVE_GROUP => new LeaveGroupRequest.Builder( "test-leave-group", - Collections.singletonList( + util.List.of( new MemberIdentity() .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID)) ) @@ -340,11 +390,11 @@ class RequestQuotaTest extends BaseRequestTest { .setGroupId("test-sync-group") .setGenerationId(1) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) - .setAssignments(Collections.emptyList()) + .setAssignments(util.List.of) ) case ApiKeys.DESCRIBE_GROUPS => - new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(List("test-group").asJava)) + new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(util.List.of("test-group"))) case ApiKeys.LIST_GROUPS => new ListGroupsRequest.Builder(new ListGroupsRequestData()) @@ -361,23 +411,23 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.CREATE_TOPICS => new CreateTopicsRequest.Builder( new CreateTopicsRequestData().setTopics( - new CreatableTopicCollection(Collections.singleton( + new CreatableTopicCollection(util.Set.of( new CreatableTopic().setName("topic-2").setNumPartitions(1). setReplicationFactor(1.toShort)).iterator()))) case ApiKeys.DELETE_TOPICS => new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopicNames(Collections.singletonList("topic-2")) + .setTopicNames(util.List.of("topic-2")) .setTimeoutMs(5000)) case ApiKeys.DELETE_RECORDS => new DeleteRecordsRequest.Builder( new DeleteRecordsRequestData() .setTimeoutMs(5000) - .setTopics(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsTopic() + .setTopics(util.List.of(new DeleteRecordsRequestData.DeleteRecordsTopic() .setName(tp.topic()) - .setPartitions(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsPartition() + .setPartitions(util.List.of(new DeleteRecordsRequestData.DeleteRecordsPartition() .setPartitionIndex(tp.partition()) .setOffset(0L)))))) @@ -391,14 +441,14 @@ class RequestQuotaTest extends BaseRequestTest { val epochs = new OffsetForLeaderTopicCollection() epochs.add(new OffsetForLeaderTopic() .setTopic(tp.topic()) - .setPartitions(List(new OffsetForLeaderPartition() + .setPartitions(util.List.of(new OffsetForLeaderPartition() .setPartition(tp.partition()) .setLeaderEpoch(0) - .setCurrentLeaderEpoch(15)).asJava)) + .setCurrentLeaderEpoch(15)))) OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs) case ApiKeys.ADD_PARTITIONS_TO_TXN => - AddPartitionsToTxnRequest.Builder.forClient("test-transactional-id", 1, 0, List(tp).asJava) + AddPartitionsToTxnRequest.Builder.forClient("test-transactional-id", 1, 0, util.List.of(tp)) case ApiKeys.ADD_OFFSETS_TO_TXN => new AddOffsetsToTxnRequest.Builder(new AddOffsetsToTxnRequestData() @@ -426,7 +476,7 @@ class RequestQuotaTest extends BaseRequestTest { "test-txn-group", 2, 0, - Map.empty[TopicPartition, TxnOffsetCommitRequest.CommittedOffset].asJava, + util.Map.of[TopicPartition, TxnOffsetCommitRequest.CommittedOffset], true ) @@ -434,7 +484,7 @@ class RequestQuotaTest extends BaseRequestTest { new DescribeAclsRequest.Builder(AclBindingFilter.ANY) case ApiKeys.CREATE_ACLS => - new CreateAclsRequest.Builder(new CreateAclsRequestData().setCreations(Collections.singletonList( + new CreateAclsRequest.Builder(new CreateAclsRequestData().setCreations(util.List.of( new CreateAclsRequestData.AclCreation() .setResourceType(AdminResourceType.TOPIC.code) .setResourceName("mytopic") @@ -444,7 +494,7 @@ class RequestQuotaTest extends BaseRequestTest { .setOperation(AclOperation.WRITE.code) .setPermissionType(AclPermissionType.DENY.code)))) case ApiKeys.DELETE_ACLS => - new DeleteAclsRequest.Builder(new DeleteAclsRequestData().setFilters(Collections.singletonList( + new DeleteAclsRequest.Builder(new DeleteAclsRequestData().setFilters(util.List.of( new DeleteAclsRequestData.DeleteAclsFilter() .setResourceTypeFilter(AdminResourceType.TOPIC.code) .setResourceNameFilter(null) @@ -455,14 +505,14 @@ class RequestQuotaTest extends BaseRequestTest { .setPermissionType(AclPermissionType.DENY.code)))) case ApiKeys.DESCRIBE_CONFIGS => new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() - .setResources(Collections.singletonList(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic)))) case ApiKeys.ALTER_CONFIGS => new AlterConfigsRequest.Builder( - Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), - new AlterConfigsRequest.Config(Collections.singleton( + util.Map.of(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), + new AlterConfigsRequest.Config(util.Set.of( new AlterConfigsRequest.ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "1000000") ))), true) @@ -471,7 +521,7 @@ class RequestQuotaTest extends BaseRequestTest { .setPath(logDir) dir.topics.add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic() .setName(tp.topic) - .setPartitions(Collections.singletonList(tp.partition))) + .setPartitions(util.List.of(tp.partition))) val data = new AlterReplicaLogDirsRequestData() data.dirs.add(dir) new AlterReplicaLogDirsRequest.Builder(data) @@ -480,7 +530,7 @@ class RequestQuotaTest extends BaseRequestTest { val data = new DescribeLogDirsRequestData() data.topics.add(new DescribeLogDirsRequestData.DescribableLogDirTopic() .setTopic(tp.topic) - .setPartitions(Collections.singletonList(tp.partition))) + .setPartitions(util.List.of(tp.partition))) new DescribeLogDirsRequest.Builder(data) case ApiKeys.CREATE_PARTITIONS => @@ -493,7 +543,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.CREATE_DELEGATION_TOKEN => new CreateDelegationTokenRequest.Builder( new CreateDelegationTokenRequestData() - .setRenewers(Collections.singletonList(new CreateDelegationTokenRequestData.CreatableRenewers() + .setRenewers(util.List.of(new CreateDelegationTokenRequestData.CreatableRenewers() .setPrincipalType("User") .setPrincipalName("test"))) .setMaxLifetimeMs(1000) @@ -506,7 +556,7 @@ class RequestQuotaTest extends BaseRequestTest { .setExpiryTimePeriodMs(1000L)) case ApiKeys.DESCRIBE_DELEGATION_TOKEN => - new DescribeDelegationTokenRequest.Builder(Collections.singletonList(SecurityUtils.parseKafkaPrincipal("User:test"))) + new DescribeDelegationTokenRequest.Builder(util.List.of(SecurityUtils.parseKafkaPrincipal("User:test"))) case ApiKeys.RENEW_DELEGATION_TOKEN => new RenewDelegationTokenRequest.Builder( @@ -516,12 +566,12 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DELETE_GROUPS => new DeleteGroupsRequest.Builder(new DeleteGroupsRequestData() - .setGroupsNames(Collections.singletonList("test-group"))) + .setGroupsNames(util.List.of("test-group"))) case ApiKeys.ELECT_LEADERS => new ElectLeadersRequest.Builder( ElectionType.PREFERRED, - Collections.singletonList(new TopicPartition("my_topic", 0)), + util.List.of(new TopicPartition("my_topic", 0)), 0 ) @@ -544,9 +594,9 @@ class RequestQuotaTest extends BaseRequestTest { new OffsetDeleteRequestData() .setGroupId("test-group") .setTopics(new OffsetDeleteRequestData.OffsetDeleteRequestTopicCollection( - Collections.singletonList(new OffsetDeleteRequestData.OffsetDeleteRequestTopic() + util.List.of(new OffsetDeleteRequestData.OffsetDeleteRequestTopic() .setName("test-topic") - .setPartitions(Collections.singletonList( + .setPartitions(util.List.of( new OffsetDeleteRequestData.OffsetDeleteRequestPartition() .setPartitionIndex(0)))).iterator()))) @@ -554,7 +604,7 @@ class RequestQuotaTest extends BaseRequestTest { new DescribeClientQuotasRequest.Builder(ClientQuotaFilter.all()) case ApiKeys.ALTER_CLIENT_QUOTAS => - new AlterClientQuotasRequest.Builder(List.empty.asJava, false) + new AlterClientQuotasRequest.Builder(util.List.of, false) case ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS => new DescribeUserScramCredentialsRequest.Builder(new DescribeUserScramCredentialsRequestData()) @@ -570,7 +620,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.END_QUORUM_EPOCH => new EndQuorumEpochRequest.Builder(EndQuorumEpochRequest.singletonRequest( - tp, 10, 5, Collections.singletonList(3))) + tp, 10, 5, util.List.of(3))) case ApiKeys.DESCRIBE_QUORUM => new DescribeQuorumRequest.Builder(DescribeQuorumRequest.singletonRequest( @@ -589,7 +639,7 @@ class RequestQuotaTest extends BaseRequestTest { "client-id", 0 ) - val embedRequestData = new AlterClientQuotasRequest.Builder(List.empty.asJava, false).build() + val embedRequestData = new AlterClientQuotasRequest.Builder(util.List.of, false).build() .serializeWithHeader(requestHeader) new EnvelopeRequest.Builder(embedRequestData, new Array[Byte](0), InetAddress.getByName("192.168.1.1").getAddress) @@ -599,9 +649,9 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_PRODUCERS => new DescribeProducersRequest.Builder(new DescribeProducersRequestData() - .setTopics(List(new DescribeProducersRequestData.TopicRequest() + .setTopics(util.List.of(new DescribeProducersRequestData.TopicRequest() .setName("test-topic") - .setPartitionIndexes(List(1, 2, 3).map(Int.box).asJava)).asJava)) + .setPartitionIndexes(util.List.of[Integer](1, 2, 3))))) case ApiKeys.BROKER_REGISTRATION => new BrokerRegistrationRequest.Builder(new BrokerRegistrationRequestData()) @@ -614,7 +664,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TRANSACTIONS => new DescribeTransactionsRequest.Builder(new DescribeTransactionsRequestData() - .setTransactionalIds(List("test-transactional-id").asJava)) + .setTransactionalIds(util.List.of("test-transactional-id"))) case ApiKeys.LIST_TRANSACTIONS => new ListTransactionsRequest.Builder(new ListTransactionsRequestData()) @@ -637,23 +687,39 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) - case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => - new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()) + case ApiKeys.LIST_CONFIG_RESOURCES => + new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()) case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) case ApiKeys.SHARE_GROUP_HEARTBEAT => - new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) + new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData()) case ApiKeys.SHARE_GROUP_DESCRIBE => - new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) + new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData()) case ApiKeys.SHARE_FETCH => - new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) + new ShareFetchRequest.Builder( + new ShareFetchRequestData() + .setGroupId("test-share-group") + .setMemberId(Uuid.randomUuid().toString) + .setShareSessionEpoch(0) + .setMaxWaitMs(0) + .setMinBytes(1) + .setMaxBytes(1000000) + .setTopics(new ShareFetchRequestData.FetchTopicCollection( + util.List.of(new ShareFetchRequestData.FetchTopic() + .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) + .setPartitions( + new ShareFetchRequestData.FetchPartitionCollection( + util.List.of(new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(tp.partition) + ).iterator)) + ).iterator))) case ApiKeys.SHARE_ACKNOWLEDGE => - new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) + new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData()) case ApiKeys.ADD_RAFT_VOTER => new AddRaftVoterRequest.Builder(new AddRaftVoterRequestData()) @@ -665,19 +731,19 @@ class RequestQuotaTest extends BaseRequestTest { new UpdateRaftVoterRequest.Builder(new UpdateRaftVoterRequestData()) case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => - new InitializeShareGroupStateRequest.Builder(new InitializeShareGroupStateRequestData(), true) + new InitializeShareGroupStateRequest.Builder(new InitializeShareGroupStateRequestData()) case ApiKeys.READ_SHARE_GROUP_STATE => - new ReadShareGroupStateRequest.Builder(new ReadShareGroupStateRequestData(), true) + new ReadShareGroupStateRequest.Builder(new ReadShareGroupStateRequestData()) case ApiKeys.WRITE_SHARE_GROUP_STATE => - new WriteShareGroupStateRequest.Builder(new WriteShareGroupStateRequestData(), true) + new WriteShareGroupStateRequest.Builder(new WriteShareGroupStateRequestData()) case ApiKeys.DELETE_SHARE_GROUP_STATE => - new DeleteShareGroupStateRequest.Builder(new DeleteShareGroupStateRequestData(), true) + new DeleteShareGroupStateRequest.Builder(new DeleteShareGroupStateRequestData()) case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => - new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData(), true) + new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData()) case ApiKeys.STREAMS_GROUP_HEARTBEAT => new StreamsGroupHeartbeatRequest.Builder(new StreamsGroupHeartbeatRequestData(), true) @@ -686,13 +752,13 @@ class RequestQuotaTest extends BaseRequestTest { new StreamsGroupDescribeRequest.Builder(new StreamsGroupDescribeRequestData(), true) case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => - new DescribeShareGroupOffsetsRequest.Builder(new DescribeShareGroupOffsetsRequestData(), true) + new DescribeShareGroupOffsetsRequest.Builder(new DescribeShareGroupOffsetsRequestData()) case ApiKeys.ALTER_SHARE_GROUP_OFFSETS => - new AlterShareGroupOffsetsRequest.Builder(new AlterShareGroupOffsetsRequestData(), true) + new AlterShareGroupOffsetsRequest.Builder(new AlterShareGroupOffsetsRequestData()) case ApiKeys.DELETE_SHARE_GROUP_OFFSETS => - new DeleteShareGroupOffsetsRequest.Builder(new DeleteShareGroupOffsetsRequestData(), true) + new DeleteShareGroupOffsetsRequest.Builder(new DeleteShareGroupOffsetsRequestData()) case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) @@ -785,6 +851,21 @@ class RequestQuotaTest extends BaseRequestTest { s"Throttle time metrics for request quota updated: $smallQuotaConsumerClient") } + private def checkSmallQuotaShareFetchRequestThrottleTime(): Unit = { + // Request until throttled using client-id with default small consumer quota + // This test verifies ShareFetch is throttled similarly to Fetch (KIP-932) + val smallQuotaShareFetchClient = Client(smallQuotaConsumerClientId, ApiKeys.SHARE_FETCH) + val throttled = smallQuotaShareFetchClient.runUntil(_.throttleTimeMs > 0) + + assertTrue(throttled, s"ShareFetch response not throttled: $smallQuotaShareFetchClient") + // KIP-932: ShareFetch should use the same quota and sensors as Fetch + // Since the implementation uses the same quota mechanisms, we verify throttling occurs + assertTrue(throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.FETCH) > 0, + s"ShareFetch should be throttled using FETCH quota sensors: $smallQuotaShareFetchClient") + assertTrue(throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.REQUEST).isNaN, + s"Throttle time metrics for request quota updated: $smallQuotaShareFetchClient") + } + private def checkUnthrottledClient(apiKey: ApiKeys): Unit = { // Test that request from client with large quota is not throttled @@ -825,13 +906,16 @@ object RequestQuotaTest { class KraftTestAuthorizer extends StandardAuthorizer { override def authorize(requestContext: AuthorizableRequestContext, actions: util.List[Action]): util.List[AuthorizationResult] = { - actions.asScala.map { _ => - if (requestContext.principal != UnauthorizedPrincipal) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED - }.asJava + val results = new util.ArrayList[AuthorizationResult]() + actions.forEach(_ => { + val result = if (requestContext.principal != UnauthorizedPrincipal) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED + results.add(result) + }) + results } } - class TestPrincipalBuilder extends KafkaPrincipalBuilder with KafkaPrincipalSerde { + class TestPrincipalBuilder extends KafkaPrincipalBuilder { override def build(context: AuthenticationContext): KafkaPrincipal = { principal } diff --git a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala index 711598306f437..580b4a71f09b6 100644 --- a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala @@ -22,11 +22,11 @@ import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.test.api.{ClusterTest, Type} import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions._ import java.net.Socket import java.util.Collections -import scala.jdk.CollectionConverters._ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersionsRequestTest(cluster) { @@ -35,7 +35,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestBeforeSaslHandshakeRequest(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) + val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) try { val apiVersionsResponse = IntegrationTestUtils.sendAndReceive[ApiVersionsResponse]( new ApiVersionsRequest.Builder().build(0), socket) @@ -56,7 +56,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestAfterSaslHandshakeRequest(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) + val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) try { sendSaslHandshakeRequestValidateResponse(socket) val response = IntegrationTestUtils.sendAndReceive[ApiVersionsResponse]( @@ -72,7 +72,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestWithUnsupportedVersion(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) + val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) try { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0) val apiVersionsResponse = sendUnsupportedApiVersionRequest(apiVersionsRequest) diff --git a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala index a6b71d912b387..ec1679dd6324e 100644 --- a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala +++ b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala @@ -21,7 +21,6 @@ import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} import java.io.File import java.util.concurrent.CancellationException import kafka.integration.KafkaServerTestHarness -import kafka.log.LogManager import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.security.auth.SecurityProtocol @@ -29,11 +28,12 @@ import org.apache.kafka.common.serialization.{IntegerDeserializer, IntegerSerial import org.apache.kafka.common.utils.Exit import org.apache.kafka.metadata.BrokerState import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} -import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} +import org.apache.kafka.storage.internals.log.LogManager +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo, Timeout} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.function.Executable import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{MethodSource, ValueSource} +import org.junit.jupiter.params.provider.MethodSource import java.time.Duration import java.util.Properties @@ -104,8 +104,8 @@ class ServerShutdownTest extends KafkaServerTestHarness { // do a clean shutdown and check that offset checkpoint file exists shutdownBroker() - for (logDir <- config.logDirs) { - val OffsetCheckpointFile = new File(logDir, LogManager.RecoveryPointCheckpointFile) + for (logDir <- config.logDirs.asScala) { + val OffsetCheckpointFile = new File(logDir, LogManager.RECOVERY_POINT_CHECKPOINT_FILE) assertTrue(OffsetCheckpointFile.exists) assertTrue(OffsetCheckpointFile.length() > 0) } @@ -134,21 +134,19 @@ class ServerShutdownTest extends KafkaServerTestHarness { producer.close() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testCleanShutdownAfterFailedStartup(quorum: String): Unit = { + @Test + def testCleanShutdownAfterFailedStartup(): Unit = { propsToChangeUponRestart.setProperty(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG, "1000") shutdownBroker() shutdownKRaftController() verifyCleanShutdownAfterFailedStartup[CancellationException] } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testNoCleanShutdownAfterFailedStartupDueToCorruptLogs(quorum: String): Unit = { + @Test + def testNoCleanShutdownAfterFailedStartupDueToCorruptLogs(): Unit = { createTopic(topic) shutdownBroker() - config.logDirs.foreach { dirName => + config.logDirs.forEach { dirName => val partitionDir = new File(dirName, s"$topic-0") partitionDir.listFiles.foreach(f => TestUtils.appendNonsenseToFile(f, TestUtils.random.nextInt(1024) + 1)) } @@ -174,9 +172,8 @@ class ServerShutdownTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testShutdownWithKRaftControllerUnavailable(quorum: String): Unit = { + @Test + def testShutdownWithKRaftControllerUnavailable(): Unit = { shutdownKRaftController() killBroker(0, Duration.ofSeconds(1)) CoreUtils.delete(broker.config.logDirs) @@ -220,9 +217,8 @@ class ServerShutdownTest extends KafkaServerTestHarness { .count(isNonDaemonKafkaThread)) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testConsecutiveShutdown(quorum: String): Unit = { + @Test + def testConsecutiveShutdown(): Unit = { shutdownBroker() brokers.head.shutdown() } diff --git a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala index 9fc3165bd7b5c..27938114517a9 100644 --- a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala @@ -17,64 +17,73 @@ package kafka.server import kafka.utils.TestUtils -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, ClusterTests, Type} +import org.apache.kafka.clients.admin.DescribeShareGroupsOptions +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, ClusterTests, Type} import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords -import org.apache.kafka.common.message.{ShareAcknowledgeRequestData, ShareAcknowledgeResponseData, ShareFetchRequestData, ShareFetchResponseData} +import org.apache.kafka.common.message.{FindCoordinatorRequestData, ShareAcknowledgeRequestData, ShareAcknowledgeResponseData, ShareFetchRequestData, ShareFetchResponseData, ShareGroupHeartbeatRequestData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.common.requests.{ShareAcknowledgeRequest, ShareAcknowledgeResponse, ShareFetchRequest, ShareFetchResponse, ShareRequestMetadata} +import org.apache.kafka.common.requests.{FindCoordinatorRequest, FindCoordinatorResponse, ShareAcknowledgeRequest, ShareAcknowledgeResponse, ShareFetchRequest, ShareFetchResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, ShareRequestMetadata} import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.common.Feature +import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.junit.jupiter.api.{AfterEach, Timeout} +import java.net.Socket import java.util -import java.util.Collections -import scala.jdk.CollectionConverters._ @Timeout(1200) @ClusterTestDefaults(types = Array(Type.KRAFT), brokers = 1, serverProperties = Array( new ClusterConfigProperty(key = "group.share.persister.class.name", value = "") )) -class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster){ +class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { private final val MAX_WAIT_MS = 5000 + private final val GROUP_ID = "group" + private final val TOPIC = "topic" + private final val PARTITION = 0 + private final val MEMBER_ID = Uuid.randomUuid() @AfterEach def tearDown(): Unit = { - closeProducer + closeProducer() + closeSockets() } @ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + features = Array( + new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) ) ) def testShareFetchRequestIsInAccessibleWhenConfigsDisabled(): Unit = { - val groupId: String = "group" val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - val send: Seq[TopicIdPartition] = Seq( + val send = util.List.of( new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 0)), new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 1)) ) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val socket: Socket = connectAny() + + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) assertEquals(Errors.UNSUPPORTED_VERSION.code, shareFetchResponse.data.errorCode) assertEquals(0, shareFetchResponse.data.acquisitionLockTimeoutMs) } @ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + features = Array( + new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) ) ) def testShareAcknowledgeRequestIsInAccessibleWhenConfigsDisabled(): Unit = { - val groupId: String = "group" val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, Map.empty) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + val socket: Socket = connectAny() + + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, util.Map.of) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) assertEquals(Errors.UNSUPPORTED_VERSION.code, shareAcknowledgeResponse.data.errorCode) } @@ -83,8 +92,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ), @@ -92,44 +99,45 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ), brokers = 2 ), ) ) def testShareFetchRequestToNonLeaderReplica(): Unit = { - val groupId: String = "group" - val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - - val topic = "topic" - val partition = 0 + val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) // Create a single-partition topic and find a broker which is not the leader - val partitionToLeader = createTopicAndReturnLeaders(topic) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - val topicNames = topicIds.asScala.map(_.swap).asJava + val partitionToLeader = createTopicAndReturnLeaders(TOPIC) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val topicNames = { + val map = new java.util.LinkedHashMap[Uuid, String]() + topicIds.forEach((k, v) => map.put(v, k)) // swap key and value + map + } val leader = partitionToLeader(topicIdPartition) val nonReplicaOpt = getBrokers.find(_.config.brokerId != leader) assertTrue(nonReplicaOpt.isDefined) - val nonReplicaId = nonReplicaOpt.get.config.brokerId + val nonReplicaId = nonReplicaOpt.get.config.brokerId + + val send = util.List.of(topicIdPartition) + + val socket: Socket = connect(nonReplicaId) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 1)) // Send the share fetch request to the non-replica and verify the error code - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest, nonReplicaId) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) assertEquals(30000, shareFetchResponse.data.acquisitionLockTimeoutMs) - val partitionData = shareFetchResponse.responseData(topicNames).get(topicIdPartition) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, partitionData.errorCode) assertEquals(leader, partitionData.currentLeader().leaderId()) @@ -139,66 +147,61 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ) ) ) def testShareFetchRequestSuccess(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val send = util.List.of(topicIdPartition) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + val socket: Socket = connectAny() + + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } @@ -206,43 +209,38 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ) ) ) def testShareFetchRequestSuccessMultiplePartitions(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 0)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 1)) + val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 2)) - val topic = "topic" + val send = util.List.of(topicIdPartition1, topicIdPartition2, topicIdPartition3) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, 0)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, 1)) - val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(topic, 2)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition1, topicIdPartition2, topicIdPartition3) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partitions - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic partitions created above @@ -251,23 +249,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo produceData(topicIdPartition3, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) + val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. var responses = Seq[ShareFetchResponseData.PartitionData]() TestUtils.waitUntilTrue(() => { - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() + val partitionsCount = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() if (partitionsCount > 0) { - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - shareFetchResponseData.responses().get(0).partitions().asScala.foreach(partitionData => { + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + shareFetchResponseData.responses().stream().findFirst().get().partitions().forEach(partitionData => { if (!partitionData.acquiredRecords().isEmpty) { responses = responses :+ partitionData } @@ -280,19 +278,19 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) val expectedPartitionData2 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) val expectedPartitionData3 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) responses.foreach(partitionData => { partitionData.partitionIndex() match { @@ -307,8 +305,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ), @@ -316,52 +312,51 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ), brokers = 3 ), ) ) def testShareFetchRequestSuccessMultiplePartitionsMultipleBrokers(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() - - val topic = "topic" - - val partitionToLeaders = createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, 0)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, 1)) - val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(topic, 2)) + val partitionToLeaders = createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 0)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 1)) + val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 2)) val leader1 = partitionToLeaders(topicIdPartition1) val leader2 = partitionToLeaders(topicIdPartition2) val leader3 = partitionToLeaders(topicIdPartition3) - val send1: Seq[TopicIdPartition] = Seq(topicIdPartition1) - val send2: Seq[TopicIdPartition] = Seq(topicIdPartition2) - val send3: Seq[TopicIdPartition] = Seq(topicIdPartition3) + val send1 = util.List.of(topicIdPartition1) + val send2 = util.List.of(topicIdPartition2) + val send3 = util.List.of(topicIdPartition3) - val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + + val socket1: Socket = connect(leader1) + val socket2: Socket = connect(leader2) + val socket3: Socket = connect(leader3) + + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partitions // Create different share fetch requests for different partitions as they may have leaders on separate brokers - var shareFetchRequest1 = createShareFetchRequest(groupId, metadata, send1, Seq.empty, acknowledgementsMap) - var shareFetchRequest2 = createShareFetchRequest(groupId, metadata, send2, Seq.empty, acknowledgementsMap) - var shareFetchRequest3 = createShareFetchRequest(groupId, metadata, send3, Seq.empty, acknowledgementsMap) + var shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata, send1, util.List.of, acknowledgementsMap) + var shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata, send2, util.List.of, acknowledgementsMap) + var shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata, send3, util.List.of, acknowledgementsMap) - var shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) - var shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) - var shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3, destination = leader3) + var shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) + var shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) + var shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) initProducer() // Producing 10 records to the topic partitions created above @@ -371,54 +366,54 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above // Create different share fetch requests for different partitions as they may have leaders on separate brokers - shareFetchRequest1 = createShareFetchRequest(groupId, metadata, send1, Seq.empty, acknowledgementsMap) - shareFetchRequest2 = createShareFetchRequest(groupId, metadata, send2, Seq.empty, acknowledgementsMap) - shareFetchRequest3 = createShareFetchRequest(groupId, metadata, send3, Seq.empty, acknowledgementsMap) + shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata, send1, util.List.of, acknowledgementsMap) + shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata, send2, util.List.of, acknowledgementsMap) + shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata, send3, util.List.of, acknowledgementsMap) - shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) - shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) - shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3, destination = leader3) + shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) + shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) + shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) val shareFetchResponseData1 = shareFetchResponse1.data() assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) assertEquals(30000, shareFetchResponseData1.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData1.responses().size()) - assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) - val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) + assertEquals(topicId, shareFetchResponseData1.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData1.responses().stream().findFirst().get().partitions().size()) + val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) val shareFetchResponseData2 = shareFetchResponse2.data() assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) assertEquals(30000, shareFetchResponseData2.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData2.responses().size()) - assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) - val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) + assertEquals(topicId, shareFetchResponseData2.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData2.responses().stream().findFirst().get().partitions().size()) + val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) val shareFetchResponseData3 = shareFetchResponse3.data() assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) assertEquals(1, shareFetchResponseData3.responses().size()) - assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) - val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) + assertEquals(topicId, shareFetchResponseData3.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData3.responses().stream().findFirst().get().partitions().size()) + val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) val expectedPartitionData1 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) val expectedPartitionData2 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) val expectedPartitionData3 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) compareFetchResponsePartitions(expectedPartitionData1, partitionData1) compareFetchResponsePartitions(expectedPartitionData2, partitionData2) @@ -429,42 +424,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeRequestSuccessAccept(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize share partitions - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -472,49 +461,49 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 more records to the topic @@ -522,24 +511,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -547,8 +536,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000") @@ -556,35 +543,31 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), new ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000") ) ), ) ) def testShareFetchRequestPiggybackedAccept(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send, 15000) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket, 15000) initProducer() // Producing 10 records to the topic created above @@ -592,25 +575,25 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch: Int = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -618,28 +601,28 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic @@ -647,24 +630,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(20), Collections.singletonList(29), Collections.singletonList(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been acknowledged before and 10 to 19 are currently acquired + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(20), util.List.of(29), util.List.of(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been acknowledged before and 10 to 19 are currently acquired - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -672,42 +655,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeRequestSuccessRelease(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partiion - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -715,71 +692,71 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -787,42 +764,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestPiggybackedRelease(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -830,64 +801,64 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above produceData(topicIdPartition, 10) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(List(0L, 10L).asJava, List(9L, 19L).asJava, List(2, 1).asJava)) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0L, 10L), util.List.of(9L, 19L), util.List.of(2, 1))) - val acquiredRecords : util.List[AcquiredRecords] = new util.ArrayList[AcquiredRecords]() + val acquiredRecords = new util.ArrayList[AcquiredRecords]() var releaseAcknowledgementSent = false TestUtils.waitUntilTrue(() => { shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) if (releaseAcknowledgementSent) { // For fourth share fetch request onwards - acknowledgementsMapForFetch = Map.empty + acknowledgementsMapForFetch = util.Map.of } else { // Send a third Share Fetch request with piggybacked acknowledgements - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records + .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records releaseAcknowledgementSent = true } - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - val responseSize = shareFetchResponseData.responses().get(0).partitions().size() + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + val responseSize = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() if (responseSize > 0) { - acquiredRecords.addAll(shareFetchResponseData.responses().get(0).partitions().get(0).acquiredRecords()) + acquiredRecords.addAll(shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0).acquiredRecords()) } // There should be 2 acquired record batches finally - // 1. batch containing 0-9 offsets which were initially acknowledged as RELEASED. @@ -906,42 +877,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeRequestSuccessReject(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -949,49 +914,49 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(3.toByte))).asJava) // Reject the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + .setAcknowledgeTypes(util.List.of(3.toByte)))) // Reject the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 more records to the topic @@ -999,24 +964,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been rejected + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been rejected - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -1024,42 +989,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestPiggybackedReject(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1067,25 +1026,25 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1093,28 +1052,28 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(3.toByte))).asJava) // Reject the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + .setAcknowledgeTypes(util.List.of(3.toByte)))) // Reject the records + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been rejected + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been rejected - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic @@ -1122,24 +1081,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(20), Collections.singletonList(29), Collections.singletonList(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been rejected before and 10 to 19 are currently acquired + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(20), util.List.of(29), util.List.of(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been rejected before and 10 to 19 are currently acquired - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -1147,8 +1106,6 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.delivery.count.limit", value = "2") // Setting max delivery count config to 2 @@ -1156,35 +1113,31 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), new ClusterConfigProperty(key = "group.share.delivery.count.limit", value = "2") // Setting max delivery count config to 2 ) ), ) ) def testShareAcknowledgeRequestMaxDeliveryAttemptExhausted(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the shar partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1192,94 +1145,94 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records - var shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - var shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records + var shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + var shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) var shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) var expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - var acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + var acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForAcknowledge = Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records again - shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForAcknowledge = util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records again + shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 new records to the topic @@ -1287,24 +1240,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only new records from offset 10 to 19 will be fetched, records at offsets 0 to 9 have been archived because delivery count limit has been exceeded + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only new records from offset 10 to 19 will be fetched, records at offsets 0 to 9 have been archived because delivery count limit has been exceeded - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -1312,119 +1265,108 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestSuccessfulSharingBetweenMultipleConsumers(): Unit = { - val groupId: String = "group" - - val memberId = Uuid.randomUuid() val memberId1 = Uuid.randomUuid() val memberId2 = Uuid.randomUuid() val memberId3 = Uuid.randomUuid() - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val send = util.List.of(topicIdPartition) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + val socket1: Socket = connectAny() + val socket2: Socket = connectAny() + val socket3: Socket = connectAny() + + createOffsetsTopic() + shareHeartbeat(memberId1, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + shareHeartbeat(memberId2, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + shareHeartbeat(memberId3, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Sending a dummy share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(memberId1, GROUP_ID, send, socket1) initProducer() - // Producing 10000 records to the topic created above - produceData(topicIdPartition, 10000) + // Producing 1 record to the topic created above + produceData(topicIdPartition, 1) - // Sending 3 share Fetch Requests with same groupId to the same topicPartition but with different memberIds, - // mocking the behaviour of multiple share consumers from the same share group + // Sending a share Fetch Request val metadata1: ShareRequestMetadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, send, Seq.empty, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) - - val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, send, Seq.empty, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) - - val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, send, Seq.empty, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) - - val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) - val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) - val shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3) - - + val acknowledgementsMap1 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata1, send, util.List.of, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) + val shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) val shareFetchResponseData1 = shareFetchResponse1.data() - assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) - assertEquals(1, shareFetchResponseData1.responses().size()) - assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) + val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) - val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) + // Producing 1 record to the topic created above + produceData(topicIdPartition, 1) + // Sending another share Fetch Request with same groupId to the same topicPartition but with different memberId, + // mocking the behaviour of multiple share consumers from the same share group + val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap2 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata2, send, util.List.of, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) + val shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) val shareFetchResponseData2 = shareFetchResponse2.data() - assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) - assertEquals(1, shareFetchResponseData2.responses().size()) - assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) + val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) - val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) + // Producing 1 record to the topic created above + produceData(topicIdPartition, 1) + // Sending another share Fetch Request with same groupId to the same topicPartition but with different memberId, + // mocking the behaviour of multiple share consumers from the same share group + val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap3 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata3, send, util.List.of, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) + val shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) val shareFetchResponseData3 = shareFetchResponse3.data() - assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) - assertEquals(1, shareFetchResponseData3.responses().size()) - assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) + val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) + + // Each consumer should have received 1 record and any record should only be consumed by 1 consumer + assertEquals(partitionData1.acquiredRecords().get(0).firstOffset(), partitionData1.acquiredRecords().get(0).lastOffset()) + assertEquals(partitionData1.acquiredRecords().get(0).firstOffset(), 0) + + assertEquals(partitionData2.acquiredRecords().get(0).firstOffset(), partitionData2.acquiredRecords().get(0).lastOffset()) + assertEquals(partitionData2.acquiredRecords().get(0).firstOffset(), 1) - val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) + assertEquals(partitionData3.acquiredRecords().get(0).firstOffset(), partitionData3.acquiredRecords().get(0).lastOffset()) + assertEquals(partitionData3.acquiredRecords().get(0).firstOffset(), 2) - // There should be no common records between the 3 consumers as they are part of the same group - assertTrue(partitionData1.acquiredRecords().get(0).lastOffset() < partitionData2.acquiredRecords().get(0).firstOffset()) - assertTrue(partitionData2.acquiredRecords().get(0).lastOffset() < partitionData3.acquiredRecords().get(0).firstOffset()) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) @@ -1438,20 +1380,26 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo val memberId2 = Uuid.randomUuid() val memberId3 = Uuid.randomUuid() - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket1: Socket = connectAny() + val socket2: Socket = connectAny() + val socket3: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(memberId1, groupId1, util.Map.of[String, Int](TOPIC, 3)) + shareHeartbeat(memberId2, groupId2, util.Map.of[String, Int](TOPIC, 3)) + shareHeartbeat(memberId3, groupId3, util.Map.of[String, Int](TOPIC, 3)) - // Sending 3 dummy share Fetch Requests with to inititlaize the share partitions for each share group\ - sendFirstShareFetchRequest(memberId1, groupId1, send) - sendFirstShareFetchRequest(memberId2, groupId2, send) - sendFirstShareFetchRequest(memberId3, groupId3, send) + // Sending 3 dummy share Fetch Requests with to initialize the share partitions for each share group\ + sendFirstShareFetchRequest(memberId1, groupId1, send, socket1) + sendFirstShareFetchRequest(memberId2, groupId2, send, socket2) + sendFirstShareFetchRequest(memberId3, groupId3, send, socket3) initProducer() // Producing 10 records to the topic created above @@ -1460,92 +1408,86 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending 3 share Fetch Requests with different groupId and different memberIds to the same topicPartition, // mocking the behaviour of 3 different share groups val metadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, send, Seq.empty, acknowledgementsMap1) + val acknowledgementsMap1 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, send, util.List.of, acknowledgementsMap1) val metadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, send, Seq.empty, acknowledgementsMap2) + val acknowledgementsMap2 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, send, util.List.of, acknowledgementsMap2) val metadata3 = new ShareRequestMetadata(memberId3, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, send, Seq.empty, acknowledgementsMap3) + val acknowledgementsMap3 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, send, util.List.of, acknowledgementsMap3) - val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) - val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) - val shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3) + val shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) + val shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) + val shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) val shareFetchResponseData1 = shareFetchResponse1.data() assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) assertEquals(1, shareFetchResponseData1.responses().size()) - assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData1.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData1.responses().stream().findFirst().get().partitions().size()) - val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) + val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) val shareFetchResponseData2 = shareFetchResponse2.data() assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) assertEquals(1, shareFetchResponseData2.responses().size()) - assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData2.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData2.responses().stream().findFirst().get().partitions().size()) - val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) + val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) val shareFetchResponseData3 = shareFetchResponse3.data() assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) assertEquals(1, shareFetchResponseData3.responses().size()) - assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData3.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData3.responses().stream().findFirst().get().partitions().size()) - val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) + val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) // All the consumers should consume all the records since they are part of different groups - assertEquals(partitionData1.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - assertEquals(partitionData2.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - assertEquals(partitionData3.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + assertEquals(partitionData1.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + assertEquals(partitionData2.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + assertEquals(partitionData3.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareSessionCloseWithShareFetch(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1553,25 +1495,25 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1579,39 +1521,39 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Sending a final fetch request to close the session shareSessionEpoch = ShareRequestMetadata.FINAL_EPOCH - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(19) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) @@ -1623,42 +1565,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareSessionCloseWithShareAcknowledge(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1666,25 +1602,25 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1692,52 +1628,52 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Sending a Share Acknowledge request to close the session shareSessionEpoch = ShareRequestMetadata.FINAL_EPOCH - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(10) - .setLastOffset(19) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(10) + .setLastOffset(19) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) } @@ -1745,52 +1681,43 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchInitialEpochWithAcknowledgements(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() - - val topic = "topic" - val partition = 0 - - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + val send = util.List.of(topicIdPartition) - val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Acknowledgements in the Initial Fetch Request - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val socket: Socket = connectAny() + + val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)))) // Acknowledgements in the Initial Fetch Request + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() // The response will have a top level error code because this is an Initial Fetch request with acknowledgement data present @@ -1802,48 +1729,39 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeInitialRequestError(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 - - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() // Send the share fetch request to fetch the records produced above - val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> - List(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, + util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)))) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareAcknowledgeResponseData.errorCode) @@ -1853,42 +1771,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestInvalidShareSessionEpoch(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1896,31 +1808,31 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) // Sending a thord Share Fetch request with invalid share session epoch shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(shareSessionEpoch)) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareFetchResponseData.errorCode) @@ -1930,42 +1842,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeRequestInvalidShareSessionEpoch(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topic = "topic" - val partition = 0 + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -1973,36 +1879,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) // Sending Share Acknowledge request with invalid share session epoch shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(shareSessionEpoch)) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(util.List.of(1.toByte)))) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareAcknowledgeResponseData.errorCode) @@ -2012,43 +1918,38 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestShareSessionNotFound(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() val wrongMemberId = Uuid.randomUuid() - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val send = util.List.of(topicIdPartition) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + val socket: Socket = connectAny() + + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -2056,31 +1957,30 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) - + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) - // Sending a third Share Fetch request with wrong member Id + // Sending a third Share Fetch request with wrong memberId shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(wrongMemberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.SHARE_SESSION_NOT_FOUND.code, shareFetchResponseData.errorCode) @@ -2090,43 +1990,122 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "group.share.max.share.sessions", value = "2"), + new ClusterConfigProperty(key = "group.share.max.size", value = "2") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "group.share.max.share.sessions", value = "2"), + new ClusterConfigProperty(key = "group.share.max.size", value = "2") + ) + ), + ) + ) + def testShareSessionEvictedOnConnectionDrop(): Unit = { + val memberId1 = Uuid.randomUuid() + val memberId2 = Uuid.randomUuid() + val memberId3 = Uuid.randomUuid() + + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + + val send = util.List.of(topicIdPartition) + + val socket1: Socket = connectAny() + val socket2: Socket = connectAny() + val socket3: Socket = connectAny() + + // member1 sends share fetch request to register its share session. Note it does not close the socket connection after. + TestUtils.waitUntilTrue(() => { + val metadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket1) + val shareFetchResponseData = shareFetchResponse.data() + shareFetchResponseData.errorCode == Errors.NONE.code + }, "Share fetch request failed", 5000) + + // member2 sends share fetch request to register its share session. Note it does not close the socket connection after. + TestUtils.waitUntilTrue(() => { + val metadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket2) + val shareFetchResponseData = shareFetchResponse.data() + shareFetchResponseData.errorCode == Errors.NONE.code + }, "Share fetch request failed", 5000) + + // member3 sends share fetch request to register its share session. Since the maximum number of share sessions that could + // exist in the share session cache is 2 (group.share.max.share.sessions), the attempt to register a third + // share session with the ShareSessionCache would throw SHARE_SESSION_LIMIT_REACHED + TestUtils.waitUntilTrue(() => { + val metadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket3) + val shareFetchResponseData = shareFetchResponse.data() + shareFetchResponseData.errorCode == Errors.SHARE_SESSION_LIMIT_REACHED.code + }, "Share fetch request failed", 5000) + + // Now we will close the socket connections for the members, mimicking a client disconnection + closeSockets() + + val socket4: Socket = connectAny() + + // Since one of the socket connections was closed before, the corresponding share session was dropped from the ShareSessionCache + // on the broker. Now, since the cache is not full, new share sessions can be registered + TestUtils.waitUntilTrue(() => { + val metadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket4) + val shareFetchResponseData = shareFetchResponse.data() + shareFetchResponseData.errorCode == Errors.NONE.code + }, "Share fetch request failed", 5000) + } + + @ClusterTests( + Array( + new ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") + ) + ), + new ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), + new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareAcknowledgeRequestShareSessionNotFound(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() val wrongMemberId = Uuid.randomUuid() - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above @@ -2134,36 +2113,36 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, Map.empty) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) - // Sending a Share Acknowledge request with wrong member Id + // Sending a Share Acknowledge request with wrong memberId shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(wrongMemberId, shareSessionEpoch) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) + .setAcknowledgeTypes(util.List.of(1.toByte)))) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.SHARE_SESSION_NOT_FOUND.code, shareAcknowledgeResponseData.errorCode) @@ -2173,44 +2152,40 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ), ) ) def testShareFetchRequestForgetTopicPartitions(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid() - - val topic = "topic1" val partition1 = 0 val partition2 = 1 - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, partition1)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, partition2)) + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, partition1)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, partition2)) + + val send = util.List.of(topicIdPartition1, topicIdPartition2) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition1, topicIdPartition2) + val socket: Socket = connectAny() + + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic partitions created above @@ -2219,23 +2194,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - var shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap) + var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. var responses = Seq[ShareFetchResponseData.PartitionData]() TestUtils.waitUntilTrue(() => { - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() + val partitionsCount = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() if (partitionsCount > 0) { - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - shareFetchResponseData.responses().get(0).partitions().asScala.foreach(partitionData => { + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + shareFetchResponseData.responses().stream().findFirst().get().partitions().forEach(partitionData => { if (!partitionData.acquiredRecords().isEmpty) { responses = responses :+ partitionData } @@ -2250,25 +2225,25 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send another share fetch request with forget list populated with topicIdPartition2 shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - val forget: Seq[TopicIdPartition] = Seq(topicIdPartition1) - shareFetchRequest = createShareFetchRequest(groupId, metadata, Seq.empty, forget, acknowledgementsMap) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + val forget = util.List.of(topicIdPartition1) + shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, util.List.of, forget, acknowledgementsMap) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) + assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) - val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } @@ -2276,66 +2251,61 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ) ) ) def testShareFetchRequestWithMaxRecordsAndBatchSize(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val send = util.List.of(topicIdPartition) - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + val socket: Socket = connectAny() + + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap, maxRecords = 1, batchSize = 1) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap, maxRecords = 1, batchSize = 1) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses.size) - assertEquals(topicId, shareFetchResponseData.responses.get(0).topicId) - assertEquals(1, shareFetchResponseData.responses.get(0).partitions.size) + assertEquals(topicId, shareFetchResponseData.responses.stream().findFirst().get().topicId) + assertEquals(1, shareFetchResponseData.responses.stream().findFirst().get().partitions.size) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(0), util.List.of(1))) - val partitionData = shareFetchResponseData.responses.get(0).partitions.get(0) + val partitionData = shareFetchResponseData.responses.stream().findFirst().get().partitions.get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } @@ -2343,82 +2313,77 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") ) ) ) ) def testShareFetchRequestMultipleBatchesWithMaxRecordsAndBatchSize(): Unit = { - val groupId: String = "group" - val memberId = Uuid.randomUuid - val topic = "topic" - val partition = 0 + createTopicAndReturnLeaders(TOPIC, numPartitions = 3) + val topicIds = getTopicIds + val topicId = topicIds.get(TOPIC) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + + val send = util.List.of(topicIdPartition) - createTopicAndReturnLeaders(topic, numPartitions = 3) - val topicIds = getTopicIds.asJava - val topicId = topicIds.get(topic) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val socket: Socket = connectAny() - val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + createOffsetsTopic() + shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId, groupId, send) + sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, Seq.empty, acknowledgementsMap, maxRecords = 5, batchSize = 1) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] + val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap, maxRecords = 5, batchSize = 1) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses.size) - assertEquals(topicId, shareFetchResponseData.responses.get(0).topicId) - assertEquals(1, shareFetchResponseData.responses.get(0).partitions.size) + assertEquals(topicId, shareFetchResponseData.responses.stream().findFirst().get().topicId) + assertEquals(1, shareFetchResponseData.responses.stream().findFirst().get().partitions.size) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(partition) + .setPartitionIndex(PARTITION) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0, 1, 2, 3, 4), util.List.of(0, 1, 2, 3, 4), util.List.of(1, 1, 1, 1, 1))) - val partitionData = shareFetchResponseData.responses.get(0).partitions.get(0) + val partitionData = shareFetchResponseData.responses.stream().findFirst().get().partitions.get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } // For initial fetch request, the response may not be available in the first attempt when the share // partition is not initialized yet. Hence, wait for response from all partitions before proceeding. - private def sendFirstShareFetchRequest(memberId: Uuid, groupId: String, topicIdPartitions: Seq[TopicIdPartition], lockTimeout: Int = 30000): Unit = { + private def sendFirstShareFetchRequest(memberId: Uuid, groupId: String, topicIdPartitions: util.List[TopicIdPartition], socket: Socket, lockTimeout: Int = 30000): Unit = { val partitions: util.Set[Integer] = new util.HashSet() TestUtils.waitUntilTrue(() => { val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, topicIdPartitions, Seq.empty, Map.empty) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, topicIdPartitions, util.List.of, util.Map.of) + val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(lockTimeout, shareFetchResponseData.acquisitionLockTimeoutMs) - shareFetchResponseData.responses().asScala.foreach(response => { + shareFetchResponseData.responses().forEach(response => { if (!response.partitions().isEmpty) { response.partitions().forEach(partitionData => partitions.add(partitionData.partitionIndex)) } @@ -2428,9 +2393,52 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo }, "Share fetch request failed", 5000) } + private def shareHeartbeat(memberId: Uuid, groupId: String, topics: util.Map[String, Int]): Unit = { + val coordResp = connectAndReceive[FindCoordinatorResponse](new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() + .setKey(groupId) + .setKeyType(0.toByte) + ).build(0) + ) + + val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setMemberId(memberId.toString) + .setGroupId(groupId) + .setMemberEpoch(0) + .setSubscribedTopicNames(new java.util.ArrayList[String](topics.keySet())) + ).build() + + TestUtils.waitUntilTrue(() => { + val resp = connectAndReceive[ShareGroupHeartbeatResponse](shareGroupHeartbeatRequest, coordResp.node().id()) + resp.data().errorCode() == Errors.NONE.code() && assignment(memberId.toString, groupId) + }, "Heartbeat failed") + } + + private def assignment(memberId: String, groupId: String): Boolean = { + val admin = cluster.admin() + + val members = admin + .describeShareGroups(util.List.of(groupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) + .describedGroups() + .get(groupId) + .get() + .members() + + var isAssigned = false + val iter = members.iterator() + while (iter.hasNext && !isAssigned) { + val desc = iter.next() + if (desc.consumerId() == memberId && !desc.assignment().topicPartitions().isEmpty) + isAssigned = true + } + + admin.close() + isAssigned + } + private def expectedAcquiredRecords(firstOffsets: util.List[Long], lastOffsets: util.List[Long], deliveryCounts: util.List[Int]): util.List[AcquiredRecords] = { val acquiredRecordsList: util.List[AcquiredRecords] = new util.ArrayList() - for (i <- firstOffsets.asScala.indices) { + for (i <- 0 until firstOffsets.size()) { acquiredRecordsList.add(new AcquiredRecords() .setFirstOffset(firstOffsets.get(i)) .setLastOffset(lastOffsets.get(i)) @@ -2456,22 +2464,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo private def createShareFetchRequest(groupId: String, metadata: ShareRequestMetadata, - send: Seq[TopicIdPartition], - forget: Seq[TopicIdPartition], - acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], + send: util.List[TopicIdPartition], + forget: util.List[TopicIdPartition], + acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], maxWaitMs: Int = MAX_WAIT_MS, minBytes: Int = 0, maxBytes: Int = Int.MaxValue, maxRecords: Int = 500, batchSize: Int = 500): ShareFetchRequest = { - ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxRecords, batchSize, send.asJava, forget.asJava, acknowledgementsMap.asJava) + ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxRecords, batchSize, send, forget, acknowledgementsMap) .build() } - - private def createShareAcknowledgeRequest(groupId: String, + + private def createShareAcknowledgeRequest(groupId: String, metadata: ShareRequestMetadata, - acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]]): ShareAcknowledgeRequest = { - ShareAcknowledgeRequest.Builder.forConsumer(groupId, metadata, acknowledgementsMap.asJava) + acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] + ): ShareAcknowledgeRequest = { + ShareAcknowledgeRequest.Builder.forConsumer(groupId, metadata, acknowledgementsMap) .build() } } diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala index dfd478616da30..408f31db8d15f 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala @@ -29,7 +29,7 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.config.ServerConfigs +import org.apache.kafka.server.common.Feature import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Timeout @@ -43,14 +43,13 @@ import scala.jdk.CollectionConverters._ class ShareGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, value = "true") + features = Array( + new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) ) ) def testShareGroupDescribeIsInAccessibleWhenConfigsDisabled(): Unit = { val shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( - new ShareGroupDescribeRequestData().setGroupIds(List("grp-1", "grp-2").asJava), - true + new ShareGroupDescribeRequestData().setGroupIds(List("grp-1", "grp-2").asJava) ).build(ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) val shareGroupDescribeResponse = connectAndReceive[ShareGroupDescribeResponse](shareGroupDescribeRequest) @@ -71,10 +70,8 @@ class ShareGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic,consumer,share"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), - new ClusterConfigProperty(key = ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG, value = "true"), ) ) def testShareGroupDescribe(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala index ea9308730c3c5..b05a97fe119c2 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala @@ -16,18 +16,20 @@ */ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, Type} import kafka.utils.TestUtils -import kafka.utils.TestUtils.waitForAllPartitionsMetadata import org.apache.kafka.clients.admin.{Admin, NewPartitions} -import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.common.Uuid import org.apache.kafka.common.message.{ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse} import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.common.Feature +import org.apache.kafka.server.IntegrationTestUtils; import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals, assertNotNull, assertNull, assertTrue} import org.junit.jupiter.api.Timeout + import java.util import scala.jdk.CollectionConverters._ @@ -38,11 +40,13 @@ import scala.jdk.CollectionConverters._ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest( - types = Array(Type.KRAFT) + features = Array( + new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) + ) ) def testShareGroupHeartbeatIsInAccessibleWhenConfigsDisabled(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData(), true + new ShareGroupHeartbeatRequestData() ).build() val shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -51,9 +55,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( - types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -76,8 +78,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the request until receiving a successful response. There is a delay @@ -105,8 +106,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), - true + .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch) ).build() // This is the expected assignment. here @@ -132,8 +132,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(-1), - true + .setMemberEpoch(-1) ).build() shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -146,9 +145,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( - types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -171,8 +168,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the request until receiving a successful response. There is a delay @@ -195,8 +191,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the second member request until receiving a successful response. @@ -225,8 +220,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId1) - .setMemberEpoch(1), - true + .setMemberEpoch(1) ).build() // Heartbeats until the partitions are assigned for member 1. @@ -241,8 +235,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId1) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch()), - true + .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch()) ).build() false } @@ -257,8 +250,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId2) - .setMemberEpoch(2), - true + .setMemberEpoch(2) ).build() // Heartbeats until the partitions are assigned for member 2. @@ -288,8 +280,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId1) - .setMemberEpoch(3), - true + .setMemberEpoch(3) ).build() // Heartbeats until the response for no change of assignment occurs for member 1 with same epoch. @@ -308,9 +299,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( - types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -333,8 +322,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the request until receiving a successful response. There is a delay @@ -369,8 +357,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1), - true + .setMemberEpoch(1) ).build() TestUtils.waitUntilTrue(() => { @@ -387,8 +374,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberEpoch(-1) - .setMemberId(memberId), - true + .setMemberId(memberId) ).build() // Send the member request until receiving a successful response. @@ -406,8 +392,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberEpoch(0) .setMemberId(memberId) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -423,9 +408,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( - types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -446,8 +429,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar", "baz").asJava), - true + .setSubscribedTopicNames(List("foo", "bar", "baz").asJava) ).build() // Send the request until receiving a successful response. There is a delay // here because the group coordinator is loaded in the background. @@ -487,12 +469,11 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1), - true + .setMemberEpoch(1) ).build() - cluster.waitForTopic("foo", 2) - cluster.waitForTopic("bar", 3) + cluster.waitTopicCreation("foo", 2) + cluster.waitTopicCreation("bar", 3) TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -526,8 +507,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(3), - true + .setMemberEpoch(3) ).build() TestUtils.waitUntilTrue(() => { @@ -540,7 +520,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { // Verify the response. assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) // Increasing the partitions of topic bar which is already being consumed in the share group. - increasePartitions(admin, "bar", 6, Seq.empty) + increasePartitions(admin, "bar", 6) expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() .setTopicPartitions(List( @@ -558,8 +538,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(5), - true + .setMemberEpoch(5) ).build() TestUtils.waitUntilTrue(() => { @@ -592,8 +571,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(7), - true + .setMemberEpoch(7) ).build() TestUtils.waitUntilTrue(() => { @@ -610,10 +588,82 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } } + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "group.share.max.size", value = "2") + )) + def testShareGroupMaxSizeConfigExceeded(): Unit = { + val groupId: String = "group" + val memberId1 = Uuid.randomUuid() + val memberId2 = Uuid.randomUuid() + val memberId3 = Uuid.randomUuid() + + val admin = cluster.admin() + + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group by the first member (memberId1). + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Heartbeat request to join the group by the second member (memberId2). + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava) + ).build() + + // Send the request until receiving a successful response + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Heartbeat request to join the group by the third member (memberId3). + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId3.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava) + ).build() + + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + // Since the group.share.max.size config is set to 2, a third member cannot join the same group. + assertEquals(shareGroupHeartbeatResponse.data.errorCode, Errors.GROUP_MAX_SIZE_REACHED.code) + + } finally { + admin.close() + } + } + @ClusterTest( types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.heartbeat.interval.ms", value = "500"), @@ -640,8 +690,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the request until receiving a successful response. There is a delay @@ -677,8 +726,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true + .setMemberEpoch(memberEpoch) ).build() TestUtils.waitUntilTrue(() => { @@ -697,8 +745,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(memberId) .setMemberEpoch(memberEpoch) - .setSubscribedTopicNames(List("foo", "bar").asJava), - true + .setSubscribedTopicNames(List("foo", "bar").asJava) ).build() val barId = TestUtils.createTopicWithAdminRaw( @@ -729,7 +776,6 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(memberId) .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), - true ).build() false } @@ -744,8 +790,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true + .setMemberEpoch(memberEpoch) ).build() TestUtils.waitUntilTrue(() => { @@ -764,8 +809,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true + .setMemberEpoch(memberEpoch) ).build() TestUtils.waitUntilTrue(() => { @@ -779,8 +823,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar").asJava), - true + .setSubscribedTopicNames(List("foo", "bar").asJava) ).build() TestUtils.waitUntilTrue(() => { @@ -799,9 +842,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( - types = Array(Type.KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -822,8 +863,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true + .setSubscribedTopicNames(List("foo").asJava) ).build() // Send the request until receiving a successful response. There is a delay // here because the group coordinator is loaded in the background. @@ -853,8 +893,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1), - true + .setMemberEpoch(1) ).build() TestUtils.waitUntilTrue(() => { @@ -875,8 +914,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(2), - true + .setMemberEpoch(2) ).build() // Should receive no error and no assignment changes. @@ -895,29 +933,14 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } private def connectAndReceive(request: ShareGroupHeartbeatRequest): ShareGroupHeartbeatResponse = { - IntegrationTestUtils.connectAndReceive[ShareGroupHeartbeatResponse]( - request, - cluster.anyBrokerSocketServer(), - cluster.clientListener() - ) + IntegrationTestUtils.connectAndReceive[ShareGroupHeartbeatResponse](request, cluster.brokerBoundPorts().get(0)) } private def increasePartitions[B <: KafkaBroker](admin: Admin, topic: String, - totalPartitionCount: Int, - brokersToValidate: Seq[B] + totalPartitionCount: Int ): Unit = { val newPartitionSet: Map[String, NewPartitions] = Map.apply(topic -> NewPartitions.increaseTo(totalPartitionCount)) admin.createPartitions(newPartitionSet.asJava) - - if (brokersToValidate.nonEmpty) { - // wait until we've propagated all partitions metadata to all brokers - val allPartitionsMetadata = waitForAllPartitionsMetadata(brokersToValidate, topic, totalPartitionCount) - (0 until totalPartitionCount - 1).foreach(i => { - allPartitionsMetadata.get(new TopicPartition(topic, i)).foreach { partitionMetadata => - assertEquals(totalPartitionCount, partitionMetadata.isr.size) - } - }) - } } } diff --git a/core/src/test/scala/unit/kafka/server/StreamsGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/StreamsGroupDescribeRequestTest.scala new file mode 100644 index 0000000000000..1a08645f49520 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/StreamsGroupDescribeRequestTest.scala @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import kafka.utils.TestUtils +import org.apache.kafka.common.message.{StreamsGroupDescribeRequestData, StreamsGroupDescribeResponseData, StreamsGroupHeartbeatRequestData, StreamsGroupHeartbeatResponseData} +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.{StreamsGroupDescribeRequest, StreamsGroupDescribeResponse} +import org.apache.kafka.common.resource.ResourceType +import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.common.test.api._ + +import scala.jdk.CollectionConverters._ +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.security.authorizer.AclEntry +import org.apache.kafka.server.common.Feature +import org.junit.Assert.{assertEquals, assertTrue} + +import java.lang.{Byte => JByte} + +@ClusterTestDefaults( + types = Array(Type.KRAFT), + brokers = 1, + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) +) +class StreamsGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + + @ClusterTest( + features = Array( + new ClusterFeature(feature = Feature.STREAMS_VERSION, version = 0) + ) + ) + def testStreamsGroupDescribeWhenFeatureFlagNotEnabled(): Unit = { + val streamsGroupDescribeRequest = new StreamsGroupDescribeRequest.Builder( + new StreamsGroupDescribeRequestData().setGroupIds(List("grp-mock-1", "grp-mock-2").asJava) + ).build(ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) + + val streamsGroupDescribeResponse = connectAndReceive[StreamsGroupDescribeResponse](streamsGroupDescribeRequest) + val expectedResponse = new StreamsGroupDescribeResponseData() + expectedResponse.groups().add( + new StreamsGroupDescribeResponseData.DescribedGroup() + .setGroupId("grp-mock-1") + .setErrorCode(Errors.UNSUPPORTED_VERSION.code) + ) + expectedResponse.groups().add( + new StreamsGroupDescribeResponseData.DescribedGroup() + .setGroupId("grp-mock-2") + .setErrorCode(Errors.UNSUPPORTED_VERSION.code) + ) + assertEquals(expectedResponse, streamsGroupDescribeResponse.data) + } + + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic,consumer,streams"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testStreamsGroupDescribeGroupsWithNewGroupCoordinator(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + val admin = cluster.admin() + val topicName = "foo" + + try { + TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = topicName, + numPartitions = 3 + ) + + TestUtils.waitUntilTrue(() => { + admin.listTopics().names().get().contains(topicName) + }, msg = s"Topic $topicName is not available to the group coordinator") + + val timeoutMs = 5 * 60 * 1000 + val clientId = "client-id" + val clientHost = "/127.0.0.1" + val authorizedOperationsInt = Utils.to32BitField( + AclEntry.supportedOperations(ResourceType.GROUP).asScala + .map(_.code.asInstanceOf[JByte]).asJava) + + var grp1Member1Response: StreamsGroupHeartbeatResponseData = null + var grp1Member2Response: StreamsGroupHeartbeatResponseData = null + var grp2Member1Response: StreamsGroupHeartbeatResponseData = null + var grp2Member2Response: StreamsGroupHeartbeatResponseData = null + + // grp-1 with 2 members + TestUtils.waitUntilTrue(() => { + grp1Member1Response = streamsGroupHeartbeat( + groupId = "grp-1", + memberId = "member-1", + rebalanceTimeoutMs = timeoutMs, + activeTasks = List.empty, + standbyTasks = List.empty, + warmupTasks = List.empty, + topology = new StreamsGroupHeartbeatRequestData.Topology() + .setEpoch(1) + .setSubtopologies(List( + new StreamsGroupHeartbeatRequestData.Subtopology() + .setSubtopologyId("subtopology-1") + .setSourceTopics(List(topicName).asJava) + .setRepartitionSinkTopics(List.empty.asJava) + .setRepartitionSourceTopics(List.empty.asJava) + .setStateChangelogTopics(List.empty.asJava) + ).asJava) + ) + grp1Member2Response = streamsGroupHeartbeat( + groupId = "grp-1", + memberId = "member-2", + rebalanceTimeoutMs = timeoutMs, + activeTasks = List.empty, + standbyTasks = List.empty, + warmupTasks = List.empty, + topology = new StreamsGroupHeartbeatRequestData.Topology() + .setEpoch(1) + .setSubtopologies(List( + new StreamsGroupHeartbeatRequestData.Subtopology() + .setSubtopologyId("subtopology-1") + .setSourceTopics(List(topicName).asJava) + .setRepartitionSinkTopics(List.empty.asJava) + .setRepartitionSourceTopics(List.empty.asJava) + .setStateChangelogTopics(List.empty.asJava) + ).asJava) + ) + + val groupsDescription1 = streamsGroupDescribe( + groupIds = List("grp-1"), + includeAuthorizedOperations = true + ) + grp1Member1Response.errorCode == Errors.NONE.code && grp1Member2Response.errorCode == Errors.NONE.code && + groupsDescription1.size == 1 && groupsDescription1.head.members.size == 2 + }, msg = s"Could not create grp-1 with 2 members successfully") + + // grp-2 with 2 members + TestUtils.waitUntilTrue(() => { + grp2Member1Response = streamsGroupHeartbeat( + groupId = "grp-2", + memberId = "member-3", + rebalanceTimeoutMs = timeoutMs, + activeTasks = List.empty, + standbyTasks = List.empty, + warmupTasks = List.empty, + topology = new StreamsGroupHeartbeatRequestData.Topology() + .setEpoch(1) + .setSubtopologies(List( + new StreamsGroupHeartbeatRequestData.Subtopology() + .setSubtopologyId("subtopology-1") + .setSourceTopics(List(topicName).asJava) + .setRepartitionSinkTopics(List.empty.asJava) + .setRepartitionSourceTopics(List.empty.asJava) + .setStateChangelogTopics(List.empty.asJava) + ).asJava) + ) + grp2Member2Response = streamsGroupHeartbeat( + groupId = "grp-2", + memberId = "member-4", + rebalanceTimeoutMs = timeoutMs, + activeTasks = List.empty, + standbyTasks = List.empty, + warmupTasks = List.empty, + topology = new StreamsGroupHeartbeatRequestData.Topology() + .setEpoch(1) + .setSubtopologies(List( + new StreamsGroupHeartbeatRequestData.Subtopology() + .setSubtopologyId("subtopology-1") + .setSourceTopics(List(topicName).asJava) + .setRepartitionSinkTopics(List.empty.asJava) + .setRepartitionSourceTopics(List.empty.asJava) + .setStateChangelogTopics(List.empty.asJava) + ).asJava) + ) + val groupsDescription2 = streamsGroupDescribe( + groupIds = List("grp-2"), + includeAuthorizedOperations = true, + version = ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled).toShort + ) + + grp2Member1Response.errorCode == Errors.NONE.code && grp2Member2Response.errorCode == Errors.NONE.code && + groupsDescription2.size == 1 && groupsDescription2.head.members.size == 2 + }, msg = s"Could not create grp-2 with 2 members successfully") + + // Send follow-up heartbeats until both groups are stable + TestUtils.waitUntilTrue(() => { + grp1Member1Response = streamsGroupHeartbeat( + groupId = "grp-1", + memberId = grp1Member1Response.memberId, + memberEpoch = grp1Member1Response.memberEpoch, + rebalanceTimeoutMs = timeoutMs, + activeTasks = convertTaskIds(grp1Member1Response.activeTasks), + standbyTasks = convertTaskIds(grp1Member1Response.standbyTasks), + warmupTasks = convertTaskIds(grp1Member1Response.warmupTasks), + topology = null + ) + grp1Member2Response = streamsGroupHeartbeat( + groupId = "grp-1", + memberId = grp1Member2Response.memberId, + memberEpoch = grp1Member2Response.memberEpoch, + rebalanceTimeoutMs = timeoutMs, + activeTasks = convertTaskIds(grp1Member2Response.activeTasks), + standbyTasks = convertTaskIds(grp1Member2Response.standbyTasks), + warmupTasks = convertTaskIds(grp1Member2Response.warmupTasks), + topology = null + ) + grp2Member1Response = streamsGroupHeartbeat( + groupId = "grp-2", + memberId = grp2Member1Response.memberId, + memberEpoch = grp2Member1Response.memberEpoch, + rebalanceTimeoutMs = timeoutMs, + activeTasks = convertTaskIds(grp2Member1Response.activeTasks), + standbyTasks = convertTaskIds(grp2Member1Response.standbyTasks), + warmupTasks = convertTaskIds(grp2Member1Response.warmupTasks), + topology = null + ) + grp2Member2Response = streamsGroupHeartbeat( + groupId = "grp-2", + memberId = grp2Member2Response.memberId, + memberEpoch = grp2Member2Response.memberEpoch, + rebalanceTimeoutMs = timeoutMs, + activeTasks = convertTaskIds(grp2Member2Response.activeTasks), + standbyTasks = convertTaskIds(grp2Member2Response.standbyTasks), + warmupTasks = convertTaskIds(grp2Member2Response.warmupTasks), + topology = null + ) + val actual = streamsGroupDescribe( + groupIds = List("grp-1","grp-2"), + includeAuthorizedOperations = true, + version = ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled).toShort + ) + actual.head.groupState() == "Stable" && actual(1).groupState() == "Stable" && + actual.head.members.size == 2 && actual(1).members.size == 2 + }, "Two groups did not stabilize with 2 members each in time") + + // Test the describe request for both groups in stable state + for (version <- ApiKeys.STREAMS_GROUP_DESCRIBE.oldestVersion() to ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { + val actual = streamsGroupDescribe( + groupIds = List("grp-1","grp-2"), + includeAuthorizedOperations = true, + version = version.toShort + ) + + assertEquals(2, actual.size) + assertEquals(actual.map(_.groupId).toSet, Set("grp-1", "grp-2")) + for (describedGroup <- actual) { + assertEquals("Stable", describedGroup.groupState) + assertTrue("Group epoch is not equal to the assignment epoch", describedGroup.groupEpoch == describedGroup.assignmentEpoch) + // Verify topology + assertEquals(1, describedGroup.topology.epoch) + assertEquals(1, describedGroup.topology.subtopologies.size) + assertEquals("subtopology-1", describedGroup.topology.subtopologies.get(0).subtopologyId) + assertEquals(List(topicName).asJava, describedGroup.topology.subtopologies.get(0).sourceTopics) + + // Verify members + assertEquals(2, describedGroup.members.size) + val expectedMemberIds = describedGroup.groupId match { + case "grp-1" => Set(grp1Member1Response.memberId, grp1Member2Response.memberId) + case "grp-2" => Set(grp2Member1Response.memberId, grp2Member2Response.memberId) + case unexpected => throw new AssertionError(s"Unexpected group ID: $unexpected") + } + + val actualMemberIds = describedGroup.members.asScala.map(_.memberId).toSet + assertEquals(expectedMemberIds, actualMemberIds) + assertEquals(authorizedOperationsInt, describedGroup.authorizedOperations) + + describedGroup.members.asScala.foreach { member => + assertTrue("Group epoch is not equal to the member epoch", member.memberEpoch == describedGroup.assignmentEpoch) + assertEquals(1, member.topologyEpoch) + assertEquals(member.targetAssignment, member.assignment) + assertEquals(clientId, member.clientId()) + assertEquals(clientHost, member.clientHost()) + } + // Verify all partitions 0, 1, 2 are assigned exactly once + val allAssignedPartitions = describedGroup.members.asScala.flatMap { member => + member.assignment.activeTasks.asScala.flatMap(_.partitions.asScala) + }.toList + assertEquals(List(0, 1, 2).sorted, allAssignedPartitions.sorted) + } + } + } finally{ + admin.close() + } + } + + private def convertTaskIds(responseTasks: java.util.List[StreamsGroupHeartbeatResponseData.TaskIds]): List[StreamsGroupHeartbeatRequestData.TaskIds] = { + if (responseTasks == null) { + List.empty + } else { + responseTasks.asScala.map { responseTask => + new StreamsGroupHeartbeatRequestData.TaskIds() + .setSubtopologyId(responseTask.subtopologyId) + .setPartitions(responseTask.partitions) + }.toList + } + } +} \ No newline at end of file diff --git a/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala index 3b95d1c9d4170..09ed807db8ecb 100644 --- a/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala @@ -64,6 +64,17 @@ class SyncGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas version = version.toShort ) + // Sync with empty group id. + verifySyncGroupWithOldProtocol( + groupId = "", + memberId = "member-id", + generationId = -1, + expectedProtocolType = null, + expectedProtocolName = null, + expectedError = Errors.INVALID_GROUP_ID, + version = version.toShort + ) + val metadata = ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription(Collections.singletonList("foo")) ).array diff --git a/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala b/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala index 6d8adef1f8bf9..2e2b32ee5ba3a 100644 --- a/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala @@ -23,7 +23,7 @@ import java.util.concurrent.DelayQueue import org.apache.kafka.common.metrics.MetricConfig import org.apache.kafka.common.utils.MockTime import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.{QuotaType, ThrottleCallback, ThrottledChannel} +import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType, ThrottleCallback, ThrottledChannel} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} diff --git a/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala b/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala index 139aeb053ffea..f9314e5d409aa 100644 --- a/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala +++ b/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala @@ -22,6 +22,7 @@ import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.record._ import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.server.{PartitionFetchState, ReplicaState} import org.junit.jupiter.api.Assertions._ import kafka.server.FetcherThreadTestUtils.{initialFetchState, mkBatch} import org.junit.jupiter.params.ParameterizedTest @@ -67,7 +68,7 @@ class TierStateMachineTest { fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) assertEquals(3L, replicaState.logEndOffset) - val expectedState = if (truncateOnFetch) Option(Fetching) else Option(Truncating) + val expectedState = if (truncateOnFetch) Option(ReplicaState.FETCHING) else Option(ReplicaState.TRUNCATING) assertEquals(expectedState, fetcher.fetchState(partition).map(_.state)) fetcher.doWork() @@ -128,7 +129,7 @@ class TierStateMachineTest { fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) assertEquals(3L, replicaState.logEndOffset) - val expectedState = if (truncateOnFetch) Option(Fetching) else Option(Truncating) + val expectedState = if (truncateOnFetch) Option(ReplicaState.FETCHING) else Option(ReplicaState.TRUNCATING) assertEquals(expectedState, fetcher.fetchState(partition).map(_.state)) fetcher.doWork() @@ -162,7 +163,9 @@ class TierStateMachineTest { var isErrorHandled = false val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) { - override def start(topicPartition: TopicPartition, currentFetchState: PartitionFetchState, fetchPartitionData: FetchResponseData.PartitionData): PartitionFetchState = { + override def start(topicPartition: TopicPartition, + currentFetchState: PartitionFetchState, + fetchPartitionData: FetchResponseData.PartitionData): PartitionFetchState = { isErrorHandled = true throw new FencedLeaderEpochException(s"Epoch ${currentFetchState.currentLeaderEpoch} is fenced") } diff --git a/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala index d41a56680a04b..aef40390d8596 100644 --- a/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala @@ -16,19 +16,16 @@ */ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import kafka.utils.TestUtils -import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.UnsupportedVersionException import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.JoinGroupRequest +import org.apache.kafka.common.requests.{EndTxnRequest, JoinGroupRequest} import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.utils.ProducerIdAndEpoch import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue} - -import scala.jdk.CollectionConverters.IterableHasAsScala +import org.junit.jupiter.api.Assertions.{assertNotEquals, assertThrows} @ClusterTestDefaults( types = Array(Type.KRAFT), @@ -51,6 +48,16 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat testTxnOffsetCommit(false) } + @ClusterTest + def testDelayedTxnOffsetCommitWithBumpedEpochIsRejectedWithNewConsumerGroupProtocol(): Unit = { + testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(true) + } + + @ClusterTest + def testDelayedTxnOffsetCommitWithBumpedEpochIsRejectedWithOldConsumerGroupProtocol(): Unit = { + testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(false) + } + private def testTxnOffsetCommit(useNewProtocol: Boolean): Unit = { val topic = "topic" val partition = 0 @@ -65,8 +72,8 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat // Join the consumer group. Note that we don't heartbeat here so we must use // a session long enough for the duration of the test. val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) - assertTrue(memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID) - assertTrue(memberEpoch != JoinGroupRequest.UNKNOWN_GENERATION_ID) + assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, memberId) + assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, memberEpoch) createTopic(topic, 1) @@ -178,7 +185,7 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat transactionalId = transactionalId ) - val originalOffset = fetchOffset(topic, partition, groupId) + val originalOffset = fetchOffset(groupId, topic, partition) commitTxnOffset( groupId = groupId, @@ -207,26 +214,107 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat TestUtils.waitUntilTrue(() => try { - fetchOffset(topic, partition, groupId) == expectedOffset + fetchOffset(groupId, topic, partition) == expectedOffset } catch { case _: Throwable => false }, "txn commit offset validation failed" ) } - private def fetchOffset( - topic: String, - partition: Int, - groupId: String - ): Long = { - val fetchOffsetsResp = fetchOffsets( - groups = Map(groupId -> List(new TopicPartition(topic, partition))), - requireStable = true, - version = ApiKeys.OFFSET_FETCH.latestVersion - ) - val groupIdRecord = fetchOffsetsResp.find(_.groupId == groupId).head - val topicRecord = groupIdRecord.topics.asScala.find(_.name == topic).head - val partitionRecord = topicRecord.partitions.asScala.find(_.partitionIndex == partition).head - partitionRecord.committedOffset + private def testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(useNewProtocol: Boolean): Unit = { + val topic = "topic" + val partition = 0 + val transactionalId = "txn" + val groupId = "group" + val offset = 100L + + // Creates the __consumer_offsets and __transaction_state topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + createTransactionStateTopic() + + // Join the consumer group. Note that we don't heartbeat here so we must use + // a session long enough for the duration of the test. + val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) + assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, memberId) + assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, memberEpoch) + + createTopic(topic, 1) + + for (version <- ApiKeys.TXN_OFFSET_COMMIT.oldestVersion to ApiKeys.TXN_OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + val useTV2 = version > EndTxnRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 + + // Initialize producer. Wait until the coordinator finishes loading. + var producerIdAndEpoch: ProducerIdAndEpoch = null + TestUtils.waitUntilTrue(() => + try { + producerIdAndEpoch = initProducerId( + transactionalId = transactionalId, + producerIdAndEpoch = ProducerIdAndEpoch.NONE, + expectedError = Errors.NONE + ) + true + } catch { + case _: Throwable => false + }, "initProducerId request failed" + ) + + addOffsetsToTxn( + groupId = groupId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId + ) + + // Complete the transaction. + endTxn( + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId, + isTransactionV2Enabled = useTV2, + committed = true, + expectedError = Errors.NONE + ) + + // Start a new transaction. Wait for the previous transaction to complete. + TestUtils.waitUntilTrue(() => + try { + addOffsetsToTxn( + groupId = groupId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, + transactionalId = transactionalId + ) + true + } catch { + case _: Throwable => false + }, "addOffsetsToTxn request failed" + ) + + // Committing offset with old epoch succeeds for TV1 and fails for TV2. + commitTxnOffset( + groupId = groupId, + memberId = if (version >= 3) memberId else JoinGroupRequest.UNKNOWN_MEMBER_ID, + generationId = if (version >= 3) 1 else JoinGroupRequest.UNKNOWN_GENERATION_ID, + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId, + topic = topic, + partition = partition, + offset = offset, + expectedError = if (useTV2) Errors.INVALID_PRODUCER_EPOCH else Errors.NONE, + version = version.toShort + ) + + // Complete the transaction. + endTxn( + producerId = producerIdAndEpoch.producerId, + producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, + transactionalId = transactionalId, + isTransactionV2Enabled = useTV2, + committed = true, + expectedError = Errors.NONE + ) + } } } diff --git a/core/src/test/scala/unit/kafka/server/WriteTxnMarkersRequestTest.scala b/core/src/test/scala/unit/kafka/server/WriteTxnMarkersRequestTest.scala new file mode 100644 index 0000000000000..a68de4dacc0f4 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/WriteTxnMarkersRequestTest.scala @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import kafka.utils.TestUtils +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.{EndTxnRequest, JoinGroupRequest} +import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} +import org.apache.kafka.common.utils.ProducerIdAndEpoch +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.junit.jupiter.api.Assertions.assertNotEquals + +@ClusterTestDefaults( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + ) +) +class WriteTxnMarkersRequestTest(cluster:ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest + def testDelayedWriteTxnMarkersShouldNotCommitTxnOffsetWithNewConsumerGroupProtocol(): Unit = { + testDelayedWriteTxnMarkersShouldNotCommitTxnOffset(true) + } + + @ClusterTest + def testDelayedWriteTxnMarkersShouldNotCommitTxnOffsetWithOldConsumerGroupProtocol(): Unit = { + testDelayedWriteTxnMarkersShouldNotCommitTxnOffset(false) + } + + private def testDelayedWriteTxnMarkersShouldNotCommitTxnOffset(useNewProtocol: Boolean): Unit = { + val topic = "topic" + val partition = 0 + val transactionalId = "txn" + val groupId = "group" + val offset = 100L + + // Creates the __consumer_offsets and __transaction_state topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + createTransactionStateTopic() + + // Join the consumer group. Note that we don't heartbeat here so we must use + // a session long enough for the duration of the test. + val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) + assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, memberId) + assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, memberEpoch) + + createTopic(topic, 1) + + for (version <- ApiKeys.TXN_OFFSET_COMMIT.oldestVersion to ApiKeys.TXN_OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + val useTV2 = version > EndTxnRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 + + // Initialize producer. Wait until the coordinator finishes loading. + var producerIdAndEpoch: ProducerIdAndEpoch = null + TestUtils.waitUntilTrue(() => + try { + producerIdAndEpoch = initProducerId( + transactionalId = transactionalId, + producerIdAndEpoch = ProducerIdAndEpoch.NONE, + expectedError = Errors.NONE + ) + true + } catch { + case _: Throwable => false + }, "initProducerId request failed" + ) + + addOffsetsToTxn( + groupId = groupId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId + ) + + // Complete the transaction. + endTxn( + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId, + isTransactionV2Enabled = useTV2, + committed = true, + expectedError = Errors.NONE + ) + + // Start a new transaction. Wait for the previous transaction to complete. + TestUtils.waitUntilTrue(() => + try { + addOffsetsToTxn( + groupId = groupId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, + transactionalId = transactionalId + ) + true + } catch { + case _: Throwable => false + }, "addOffsetsToTxn request failed" + ) + + commitTxnOffset( + groupId = groupId, + memberId = if (version >= 3) memberId else JoinGroupRequest.UNKNOWN_MEMBER_ID, + generationId = if (version >= 3) 1 else JoinGroupRequest.UNKNOWN_GENERATION_ID, + producerId = producerIdAndEpoch.producerId, + producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, + transactionalId = transactionalId, + topic = topic, + partition = partition, + offset = offset + version, + expectedError = Errors.NONE, + version = version.toShort + ) + + // Delayed txn marker should be accepted for TV1 and rejected for TV2. + // Note that for the ideal case, producer epoch + 1 should also be rejected for TV2, + // which is still under fixing. + writeTxnMarkers( + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + committed = true, + expectedError = if (useTV2) Errors.INVALID_PRODUCER_EPOCH else Errors.NONE + ) + + // The offset is committed for TV1 and not committed for TV2. + TestUtils.waitUntilTrue(() => + try { + fetchOffset(groupId, topic, partition) == (if (useTV2) -1L else offset + version) + } catch { + case _: Throwable => false + }, "unexpected txn commit offset" + ) + + // Complete the transaction. + endTxn( + producerId = producerIdAndEpoch.producerId, + producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, + transactionalId = transactionalId, + isTransactionV2Enabled = useTV2, + committed = true, + expectedError = Errors.NONE + ) + + // The offset is committed for TV2. + TestUtils.waitUntilTrue(() => + try { + fetchOffset(groupId, topic, partition) == offset + version + } catch { + case _: Throwable => false + }, "txn commit offset validation failed" + ) + } + } +} diff --git a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala index 2518da7a8578b..f1ba2c7ac5ed6 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala @@ -34,10 +34,8 @@ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.test.{TestUtils => JTestUtils} -import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.{AfterEach, Test} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import scala.collection.mutable.ListBuffer import scala.collection.{Map, Seq} @@ -64,9 +62,8 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { super.tearDown() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader(quorum: String): Unit = { + @Test + def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader(): Unit = { brokers ++= (0 to 1).map { id => createBroker(fromProps(createBrokerConfig(id))) } // Given two topics with replication of a single partition @@ -97,9 +94,8 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { waitUntilTrue(() => messagesHaveLeaderEpoch(brokers(0), expectedLeaderEpoch, 4), "Leader epoch should be 1") } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldSendLeaderEpochRequestAndGetAResponse(quorum: String): Unit = { + @Test + def shouldSendLeaderEpochRequestAndGetAResponse(): Unit = { //3 brokers, put partition on 100/101 and then pretend to be 102 brokers ++= (100 to 102).map { id => createBroker(fromProps(createBrokerConfig(id))) } @@ -145,9 +141,8 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { fetcher1.close() } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def shouldIncreaseLeaderEpochBetweenLeaderRestarts(quorum: String): Unit = { + @Test + def shouldIncreaseLeaderEpochBetweenLeaderRestarts(): Unit = { //Setup: we are only interested in the single partition on broker 101 brokers += createBroker(fromProps(createBrokerConfig(100))) assertEquals(controllerServer.config.nodeId, waitUntilQuorumLeaderElected(controllerServer)) @@ -298,7 +293,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { } private def waitUntilQuorumLeaderElected(controllerServer: ControllerServer, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = { - val (leaderAndEpoch, _) = computeUntilTrue(controllerServer.raftManager.leaderAndEpoch, waitTime = timeout)(_.leaderId().isPresent) + val (leaderAndEpoch, _) = computeUntilTrue(controllerServer.raftManager.client.leaderAndEpoch, waitTime = timeout)(_.leaderId().isPresent) leaderAndEpoch.leaderId().orElseThrow(() => new AssertionError(s"Quorum Controller leader not elected after $timeout ms")) } diff --git a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala index 1e297fc33c994..3abea688468b1 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala @@ -86,7 +86,7 @@ class OffsetsForLeaderEpochTest { //Then assertEquals( - Seq(newOffsetForLeaderTopicResult(tp, Errors.NONE, offsetAndEpoch.leaderEpoch, offsetAndEpoch.offset)), + Seq(newOffsetForLeaderTopicResult(tp, Errors.NONE, offsetAndEpoch.epoch(), offsetAndEpoch.offset)), response) } diff --git a/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala b/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala index 47b33e0dd7b82..297348ed790b5 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala @@ -116,7 +116,8 @@ class MockBlockingSender(offsets: java.util.Map[TopicPartition, EpochEndOffset], topicIds = Map.empty FetchResponse.of(Errors.NONE, 0, if (partitionData.isEmpty) JFetchMetadata.INVALID_SESSION_ID else 1, - partitionData) + partitionData, List.empty.asJava + ) case ApiKeys.LIST_OFFSETS => listOffsetsCount += 1 diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index 575d3855dcde5..32727a4c3cc7c 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -20,25 +20,30 @@ package kafka.server.metadata import kafka.coordinator.transaction.TransactionCoordinator import java.util.Collections.{singleton, singletonList, singletonMap} -import java.util.Properties +import java.util.{OptionalInt, Properties} import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} import kafka.log.LogManager +import kafka.server.share.SharePartitionManager import kafka.server.{BrokerServer, KafkaConfig, ReplicaManager} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, NewTopic} +import org.apache.kafka.common.Uuid import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.BROKER +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionRecord, RemoveTopicRecord, TopicRecord} import org.apache.kafka.common.test.{KafkaClusterTestKit, TestKitNodes} import org.apache.kafka.common.utils.Exit +import org.apache.kafka.coordinator.common.runtime.{KRaftCoordinatorMetadataDelta, KRaftCoordinatorMetadataImage} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance} +import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, DelegationTokenImage, FeaturesImage, MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance, ProducerIdsImage, ScramImage, TopicsImage} import org.apache.kafka.image.loader.LogDeltaManifest -import org.apache.kafka.metadata.publisher.AclPublisher +import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.LeaderAndEpoch -import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion} +import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, ShareVersion} import org.apache.kafka.server.fault.FaultHandler import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -48,6 +53,7 @@ import org.mockito.Mockito.{doThrow, mock, verify} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer +import java.util import java.util.concurrent.TimeUnit import scala.jdk.CollectionConverters._ @@ -181,7 +187,7 @@ class BrokerMetadataPublisherTest { } @Test - def testNewImagePushedToGroupCoordinator(): Unit = { + def testGroupCoordinatorTopicDeletion(): Unit = { val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1) val logManager = mock(classOf[LogManager]) @@ -196,7 +202,8 @@ class BrokerMetadataPublisherTest { replicaManager, groupCoordinator, mock(classOf[TransactionCoordinator]), - Some(mock(classOf[ShareCoordinator])), + mock(classOf[ShareCoordinator]), + mock(classOf[SharePartitionManager]), mock(classOf[DynamicConfigPublisher]), mock(classOf[DynamicClientQuotaPublisher]), mock(classOf[DynamicTopicClusterQuotaPublisher]), @@ -207,6 +214,70 @@ class BrokerMetadataPublisherTest { faultHandler ) + val topicId = Uuid.randomUuid() + var delta = new MetadataDelta(MetadataImage.EMPTY) + delta.replay(new TopicRecord() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setTopicId(topicId) + ) + delta.replay(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) + .setLeader(config.brokerId) + ) + delta.replay(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(1) + .setLeader(config.brokerId) + ) + val image = delta.apply(MetadataProvenance.EMPTY) + + delta = new MetadataDelta(image) + delta.replay(new RemoveTopicRecord() + .setTopicId(topicId) + ) + + metadataPublisher.onMetadataUpdate(delta, delta.apply(MetadataProvenance.EMPTY), + LogDeltaManifest.newBuilder() + .provenance(MetadataProvenance.EMPTY) + .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) + .numBatches(1) + .elapsedNs(100) + .numBytes(42) + .build()) + + verify(groupCoordinator).onResignation(0, OptionalInt.empty()) + verify(groupCoordinator).onResignation(1, OptionalInt.empty()) + } + + @Test + def testNewImagePushedToGroupCoordinator(): Unit = { + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) + val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1) + val logManager = mock(classOf[LogManager]) + val replicaManager = mock(classOf[ReplicaManager]) + val groupCoordinator = mock(classOf[GroupCoordinator]) + val faultHandler = mock(classOf[FaultHandler]) + + val metadataPublisher = new BrokerMetadataPublisher( + config, + metadataCache, + logManager, + replicaManager, + groupCoordinator, + mock(classOf[TransactionCoordinator]), + mock(classOf[ShareCoordinator]), + mock(classOf[SharePartitionManager]), + mock(classOf[DynamicConfigPublisher]), + mock(classOf[DynamicClientQuotaPublisher]), + mock(classOf[DynamicTopicClusterQuotaPublisher]), + mock(classOf[ScramPublisher]), + mock(classOf[DelegationTokenPublisher]), + mock(classOf[AclPublisher]), + faultHandler, + faultHandler, + ) + val image = MetadataImage.EMPTY val delta = new MetadataDelta.Builder() .setImage(image) @@ -221,6 +292,70 @@ class BrokerMetadataPublisherTest { .numBytes(42) .build()) - verify(groupCoordinator).onNewMetadataImage(image, delta) + verify(groupCoordinator).onNewMetadataImage(new KRaftCoordinatorMetadataImage(image), new KRaftCoordinatorMetadataDelta(delta)) + } + + @Test + def testNewShareVersionPushedToSharePartitionManager(): Unit = { + val sharePartitionManager = mock(classOf[SharePartitionManager]) + val faultHandler = mock(classOf[FaultHandler]) + + val metadataPublisher = new BrokerMetadataPublisher( + KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)), + new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1), + mock(classOf[LogManager]), + mock(classOf[ReplicaManager]), + mock(classOf[GroupCoordinator]), + mock(classOf[TransactionCoordinator]), + mock(classOf[ShareCoordinator]), + sharePartitionManager, + mock(classOf[DynamicConfigPublisher]), + mock(classOf[DynamicClientQuotaPublisher]), + mock(classOf[DynamicTopicClusterQuotaPublisher]), + mock(classOf[ScramPublisher]), + mock(classOf[DelegationTokenPublisher]), + mock(classOf[AclPublisher]), + faultHandler, + faultHandler + ) + + val featuresImage = new FeaturesImage( + util.Map.of( + MetadataVersion.FEATURE_NAME, MetadataVersion.latestTesting().featureLevel(), + ShareVersion.FEATURE_NAME, ShareVersion.SV_1.featureLevel() + ), + MetadataVersion.latestTesting()) + + val image = new MetadataImage( + MetadataProvenance.EMPTY, + featuresImage, + ClusterImageTest.IMAGE1, + TopicsImage.EMPTY, + ConfigurationsImage.EMPTY, + ClientQuotasImage.EMPTY, + ProducerIdsImage.EMPTY, + AclsImage.EMPTY, + ScramImage.EMPTY, + DelegationTokenImage.EMPTY + ) + + // Share version 1 is getting passed to features delta. + val delta = new MetadataDelta(image) + delta.replay(new FeatureLevelRecord().setName(ShareVersion.FEATURE_NAME).setFeatureLevel(1)) + + metadataPublisher.onMetadataUpdate( + delta, + image, + LogDeltaManifest.newBuilder(). + provenance(MetadataProvenance.EMPTY). + leaderAndEpoch(new LeaderAndEpoch(OptionalInt.of(1), 1)). + numBatches(1). + elapsedNs(1L). + numBytes(1). + build() + ) + + // SharePartitionManager is receiving the latest changes. + verify(sharePartitionManager).onShareVersionToggle(any(), any()) } } diff --git a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala index 5b3e9abe1112b..336a8dd55c3bc 100644 --- a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala +++ b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala @@ -33,19 +33,19 @@ import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.{Assignment, import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.config.{AbstractConfig, TopicConfig} +import org.apache.kafka.common.message.{KRaftVersionRecord, LeaderChangeMessage, SnapshotFooterRecord, SnapshotHeaderRecord, VotersRecord} import org.apache.kafka.common.metadata.{PartitionChangeRecord, RegisterBrokerRecord, TopicRecord} import org.apache.kafka.common.protocol.{ApiMessage, ByteBufferAccessor, MessageUtil, ObjectSerializationCache} -import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, MemoryRecords, Record, RecordVersion, SimpleRecord} +import org.apache.kafka.common.record.{ControlRecordType, ControlRecordUtils, EndTransactionMarker, MemoryRecords, Record, RecordVersion, SimpleRecord} import org.apache.kafka.common.utils.{Exit, Utils} import org.apache.kafka.coordinator.group.generated.{ConsumerGroupMemberMetadataValue, ConsumerGroupMetadataKey, ConsumerGroupMetadataValue, GroupMetadataKey, GroupMetadataValue} import org.apache.kafka.coordinator.share.generated.{ShareSnapshotKey, ShareSnapshotValue, ShareUpdateKey, ShareUpdateValue} import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.metadata.MetadataRecordSerde -import org.apache.kafka.raft.{KafkaRaftClient, MetadataLogConfig, OffsetAndEpoch, VoterSetTest} -import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion} -import org.apache.kafka.server.config.ServerLogConfigs +import org.apache.kafka.raft.{MetadataLogConfig, VoterSetTest} +import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.log.remote.metadata.storage.serialization.RemoteLogMetadataSerde import org.apache.kafka.server.log.remote.storage.{RemoteLogSegmentId, RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, RemoteLogSegmentState, RemotePartitionDeleteMetadata, RemotePartitionDeleteState} import org.apache.kafka.server.storage.log.FetchIsolation @@ -485,7 +485,7 @@ class DumpLogSegmentsTest { new TopicRecord().setName("test-topic").setTopicId(Uuid.randomUuid()), 0.toShort), new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(Uuid.randomUuid()).setLeader(1). - setPartitionId(0).setIsr(util.Arrays.asList(0, 1, 2)), 0.toShort) + setPartitionId(0).setIsr(util.List.of(0, 1, 2)), 0.toShort) ) val records: Array[SimpleRecord] = metadataRecords.map(message => { @@ -524,9 +524,48 @@ class DumpLogSegmentsTest { assertTrue(output.contains("skipping")) } + @Test + def testDumpControlRecord(): Unit = { + log = createTestLog + + log.appendAsLeader(MemoryRecords.withEndTransactionMarker(0L, 0.toShort, + new EndTransactionMarker(ControlRecordType.COMMIT, 100) + ), 0, AppendOrigin.COORDINATOR) + + log.appendAsLeader(MemoryRecords.withLeaderChangeMessage(0L, 0L, 0, ByteBuffer.allocate(4), + new LeaderChangeMessage() + ), 0, AppendOrigin.COORDINATOR) + + log.appendAsLeader(MemoryRecords.withSnapshotHeaderRecord(0L, 0L, 0, ByteBuffer.allocate(4), + new SnapshotHeaderRecord() + ), 0, AppendOrigin.COORDINATOR) + + log.appendAsLeader(MemoryRecords.withSnapshotFooterRecord(0L, 0L, 0, ByteBuffer.allocate(4), + new SnapshotFooterRecord() + .setVersion(ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION) + ), 0, AppendOrigin.COORDINATOR) + + log.appendAsLeader(MemoryRecords.withKRaftVersionRecord(0L, 0L, 0, ByteBuffer.allocate(4), + new KRaftVersionRecord() + ), 0, AppendOrigin.COORDINATOR) + + log.appendAsLeader(MemoryRecords.withVotersRecord(0L, 0L, 0, ByteBuffer.allocate(4), + new VotersRecord() + ), 0, AppendOrigin.COORDINATOR) + log.flush(false) + + val output = runDumpLogSegments(Array("--cluster-metadata-decoder", "--files", logFilePath)) + assertTrue(output.contains("endTxnMarker"), output) + assertTrue(output.contains("LeaderChange"), output) + assertTrue(output.contains("SnapshotHeader"), output) + assertTrue(output.contains("SnapshotFooter"), output) + assertTrue(output.contains("KRaftVersion"), output) + assertTrue(output.contains("KRaftVoters"), output) + } + @Test def testDumpMetadataSnapshot(): Unit = { - val metadataRecords = Seq( + val metadataRecords = util.List.of( new ApiMessageAndVersion( new RegisterBrokerRecord().setBrokerId(0).setBrokerEpoch(10), 0.toShort), new ApiMessageAndVersion( @@ -535,7 +574,7 @@ class DumpLogSegmentsTest { new TopicRecord().setName("test-topic").setTopicId(Uuid.randomUuid()), 0.toShort), new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(Uuid.randomUuid()).setLeader(1). - setPartitionId(0).setIsr(util.Arrays.asList(0, 1, 2)), 0.toShort) + setPartitionId(0).setIsr(util.List.of(0, 1, 2)), 0.toShort) ) val metadataLog = KafkaMetadataLog( @@ -544,15 +583,11 @@ class DumpLogSegmentsTest { logDir, time, time.scheduler, - new MetadataLogConfig( - 100 * 1024, + createMetadataLogConfig( 100 * 1024, 10 * 1000, 100 * 1024, - 60 * 1000, - KafkaRaftClient.MAX_BATCH_SIZE_BYTES, - KafkaRaftClient.MAX_FETCH_SIZE_BYTES, - ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT + 60 * 1000 ), 1 ) @@ -568,7 +603,7 @@ class DumpLogSegmentsTest { .setVoterSet(Optional.of(VoterSetTest.voterSet(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)))) .build(MetadataRecordSerde.INSTANCE) ) { snapshotWriter => - snapshotWriter.append(metadataRecords.asJava) + snapshotWriter.append(metadataRecords) snapshotWriter.freeze() } @@ -627,14 +662,14 @@ class DumpLogSegmentsTest { // Get all the batches val output = runDumpLogSegments(Array("--files", logFilePath)) - val lines = util.Arrays.asList(output.split("\n"): _*).listIterator() + val lines = util.List.of(output.split("\n"): _*).listIterator() // Get total bytes of the partial batches val partialBatchesBytes = readPartialBatchesBytes(lines, partialBatches) // Request only the partial batches by bytes val partialOutput = runDumpLogSegments(Array("--max-bytes", partialBatchesBytes.toString, "--files", logFilePath)) - val partialLines = util.Arrays.asList(partialOutput.split("\n"): _*).listIterator() + val partialLines = util.List.of(partialOutput.split("\n"): _*).listIterator() // Count the total of partial batches limited by bytes val partialBatchesCount = countBatches(partialLines) @@ -887,14 +922,14 @@ class DumpLogSegmentsTest { .setProducerEpoch(2.toShort) .setProducerId(12L) .setTransactionLastUpdateTimestampMs(123L) - .setTransactionPartitions(List( + .setTransactionPartitions(util.List.of( new TransactionLogValue.PartitionsSchema() .setTopic("topic1") - .setPartitionIds(List(0, 1, 2).map(Integer.valueOf).asJava), + .setPartitionIds(util.List.of[Integer](0, 1, 2)), new TransactionLogValue.PartitionsSchema() .setTopic("topic2") - .setPartitionIds(List(3, 4, 5).map(Integer.valueOf).asJava) - ).asJava) + .setPartitionIds(util.List.of[Integer](3, 4, 5)) + )) .setTransactionStartTimestampMs(13L) .setTransactionStatus(0) .setTransactionTimeoutMs(14), @@ -989,7 +1024,7 @@ class DumpLogSegmentsTest { ) val output = runDumpLogSegments(Array("--deep-iteration", "--files", logFilePath)) - val lines = util.Arrays.asList(output.split("\n"): _*).listIterator() + val lines = util.List.of(output.split("\n"): _*).listIterator() for (batch <- logReadInfo.records.batches.asScala) { val parsedBatchOpt = readBatchMetadata(lines) @@ -1066,13 +1101,13 @@ class DumpLogSegmentsTest { .setStartOffset(0) .setCreateTimestamp(timestamp) .setWriteTimestamp(timestamp) - .setStateBatches(List[ShareSnapshotValue.StateBatch]( + .setStateBatches(util.List.of[ShareSnapshotValue.StateBatch]( new ShareSnapshotValue.StateBatch() .setFirstOffset(0) .setLastOffset(4) .setDeliveryState(2) .setDeliveryCount(1) - ).asJava), + )), 0.toShort) )) ) @@ -1092,13 +1127,13 @@ class DumpLogSegmentsTest { .setSnapshotEpoch(0) .setLeaderEpoch(0) .setStartOffset(0) - .setStateBatches(List[ShareUpdateValue.StateBatch]( + .setStateBatches(util.List.of[ShareUpdateValue.StateBatch]( new ShareUpdateValue.StateBatch() .setFirstOffset(0) .setLastOffset(4) .setDeliveryState(2) .setDeliveryCount(1) - ).asJava), + )), 0.toShort) )) ) @@ -1155,4 +1190,19 @@ class DumpLogSegmentsTest { )) ) } + + private def createMetadataLogConfig( + internalLogSegmentBytes: Int, + logSegmentMillis: Long, + retentionMaxBytes: Long, + retentionMillis: Long + ): MetadataLogConfig = { + val config: util.Map[String, Any] = util.Map.of( + MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG, internalLogSegmentBytes, + MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, logSegmentMillis, + MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG, retentionMaxBytes, + MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG, retentionMillis, + ) + new MetadataLogConfig(new AbstractConfig(MetadataLogConfig.CONFIG_DEF, config, false)) + } } diff --git a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala index 101b8f43bc48a..a36ad51572a3f 100644 --- a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala +++ b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala @@ -79,7 +79,7 @@ class StorageToolTest { val tempDir = TestUtils.tempDir() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -97,7 +97,7 @@ Found problem: tempDir.delete() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) assertEquals(s"""Found problem: ${tempDir.toString} does not exist @@ -111,7 +111,7 @@ Found problem: val tempFile = TestUtils.tempFile() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), true, Seq(tempFile.toString))) + infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempFile.toString))) assertEquals(s"""Found problem: ${tempFile.toString} is not a directory @@ -125,13 +125,13 @@ Found problem: val tempDir = TestUtils.tempDir() try { Files.write(tempDir.toPath.resolve(MetaPropertiesEnsemble.META_PROPERTIES_NAME), - String.join("\n", util.Arrays.asList( + String.join("\n", util.List.of( "version=1", "node.id=1", "cluster.id=XcZZOzUqS4yHOjhMQB6JLQ")). getBytes(StandardCharsets.UTF_8)) assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), false, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), kraftMode = false, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -150,13 +150,13 @@ Found problem: val tempDir = TestUtils.tempDir() try { Files.write(tempDir.toPath.resolve(MetaPropertiesEnsemble.META_PROPERTIES_NAME), - String.join("\n", util.Arrays.asList( + String.join("\n", util.List.of( "version=0", "broker.id=1", "cluster.id=26c36907-4158-4a35-919d-6534229f5241")). getBytes(StandardCharsets.UTF_8)) assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -193,7 +193,7 @@ Found problem: ): Int = { val tempDir = TestUtils.tempDir() try { - val configPathString = new File(tempDir.getAbsolutePath(), "format.props").toString + val configPathString = new File(tempDir.getAbsolutePath, "format.props").toString PropertiesUtils.writePropertiesFile(properties, configPathString, true) val arguments = ListBuffer[String]("format", "--cluster-id", "XcZZOzUqS4yHOjhMQB6JLQ") @@ -234,7 +234,7 @@ Found problem: val unavailableDir1 = TestUtils.tempFile() val properties = new Properties() properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", s"${availableDir1},${unavailableDir1}") + properties.setProperty("log.dirs", s"$availableDir1,$unavailableDir1") val stream = new ByteArrayOutputStream() assertEquals(0, runFormatCommand(stream, properties)) @@ -273,7 +273,7 @@ Found problem: assertEquals(0, runFormatCommand(stream, properties)) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream2 = new ByteArrayOutputStream() - assertEquals(0, runFormatCommand(stream2, properties, Seq(), true)) + assertEquals(0, runFormatCommand(stream2, properties, Seq(), ignoreFormatted = true)) } @Test @@ -282,7 +282,7 @@ Found problem: val unavailableDir2 = TestUtils.tempFile() val properties = new Properties() properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", s"${unavailableDir1},${unavailableDir2}") + properties.setProperty("log.dirs", s"$unavailableDir1,$unavailableDir2") val stream = new ByteArrayOutputStream() assertEquals("No available log directories to format.", assertThrows(classOf[FormatterException], () => runFormatCommand(stream, properties)).getMessage) @@ -306,6 +306,20 @@ Found problem: "Failed to find content in output: " + stream.toString()) } + @Test + def testFormatWithUnsupportedReleaseVersion(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultStaticQuorumProperties) + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + val failure = assertThrows(classOf[TerseFailure], () => + runFormatCommand(stream, properties, Seq("--release-version", "3.3-IV1"))).getMessage + assertTrue(failure.contains("Unknown metadata.version '3.3-IV1'")) + assertTrue(failure.contains(MetadataVersion.MINIMUM_VERSION.version)) + assertTrue(failure.contains(MetadataVersion.latestProduction().version)) + } + @Test def testFormatWithReleaseVersionAsFeature(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) @@ -325,7 +339,7 @@ Found problem: properties.putAll(defaultStaticQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) assertEquals("Unsupported feature: non.existent.feature. Supported features are: " + - "eligible.leader.replicas.version, group.version, kraft.version, share.version, transaction.version", + "eligible.leader.replicas.version, group.version, kraft.version, share.version, streams.version, transaction.version", assertThrows(classOf[FormatterException], () => runFormatCommand(new ByteArrayOutputStream(), properties, Seq("--feature", "non.existent.feature=20"))).getMessage) @@ -372,18 +386,151 @@ Found problem: } @Test - def testFormatWithStandaloneFlagOnBrokerFails(): Unit = { + def testFormatWithReleaseVersionAndFeatureOverride(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) val properties = new Properties() properties.putAll(defaultStaticQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream = new ByteArrayOutputStream() + assertEquals(0, runFormatCommand(stream, properties, Seq( + "--release-version", "3.7-IV0", + "--feature", "share.version=1"))) + + // Verify that the feature override is applied by checking the bootstrap metadata + val bootstrapMetadata = new BootstrapDirectory(availableDirs.head.toString).read + + // Verify that the share.version feature is set to 1 as specified + assertEquals(1.toShort, bootstrapMetadata.featureLevel("share.version"), + "share.version should be set to 1") + + // Verify the command output contains the expected release version + assertTrue(stream.toString().contains("3.7-IV0"), + "Failed to find release version in output: " + stream.toString()) + + // Verify that the format command completed successfully with features + assertTrue(stream.toString().contains("Formatting metadata directory"), + "Failed to find formatting message in output: " + stream.toString()) + } + + @Test + def testFormatWithMultipleFeatures(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultStaticQuorumProperties) + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + assertEquals(0, runFormatCommand(stream, properties, Seq( + "--release-version", "3.8-IV0", + "--feature", "share.version=1", + "--feature", "transaction.version=2", + "--feature", "group.version=1"))) + + // Verify that all features are properly bootstrapped by checking the bootstrap metadata + val bootstrapMetadata = new BootstrapDirectory(availableDirs.head.toString).read + + // Verify that all specified features are set correctly + assertEquals(1.toShort, bootstrapMetadata.featureLevel("share.version"), + "share.version should be set to 1") + assertEquals(2.toShort, bootstrapMetadata.featureLevel("transaction.version"), + "transaction.version should be set to 2") + assertEquals(1.toShort, bootstrapMetadata.featureLevel("group.version"), + "group.version should be set to 1") + + // Verify the command output contains the expected release version + assertTrue(stream.toString().contains("3.8-IV0"), + "Failed to find release version in output: " + stream.toString()) + + // Verify that the format command completed successfully with multiple features + assertTrue(stream.toString().contains("Formatting metadata directory"), + "Failed to find formatting message in output: " + stream.toString()) + } + + @Test + def testFormatWithInvalidFeatureThrowsError(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultStaticQuorumProperties) + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + + // Test with an invalid feature that doesn't exist + val exception = assertThrows(classOf[FormatterException], () => { + runFormatCommand(stream, properties, Seq( + "--release-version", "3.7-IV0", + "--feature", "stream.version=1")) + }) + + assertTrue(exception.getMessage.contains("Unsupported feature: stream.version.")) + } + + @Test + def testFormatWithStandaloneFlagOnBrokerFails(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.setProperty("process.roles", "broker") + properties.setProperty("node.id", "0") + properties.setProperty("controller.listener.names", "CONTROLLER") + properties.setProperty("controller.quorum.bootstrap.servers", "localhost:9093") + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--standalone") assertEquals("You can only use --standalone on a controller.", assertThrows(classOf[TerseFailure], () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage) } + @Test + def testFormatWithStandaloneFailsWithStaticVotersConfig(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultDynamicQuorumProperties) + properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--standalone") + assertEquals("You cannot specify controller.quorum.voters and " + + "format the node with --initial-controllers or --standalone. If you " + + "want to use dynamic quorum, please remove controller.quorum.voters and " + + "specify controller.quorum.bootstrap.servers instead.", + assertThrows(classOf[TerseFailure], + () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage + ) + } + + @Test + def testFormatWithInitialControllersFailsWithStaticVotersConfig(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultDynamicQuorumProperties) + properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + val arguments = ListBuffer[String]( + "--release-version", "3.9-IV0", + "--initial-controllers", + "0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," + ) + assertEquals("You cannot specify controller.quorum.voters and " + + "format the node with --initial-controllers or --standalone. If you " + + "want to use dynamic quorum, please remove controller.quorum.voters and " + + "specify controller.quorum.bootstrap.servers instead.", + assertThrows(classOf[TerseFailure], + () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage + ) + } + + @Test + def testFormatWithNoInitialControllersPassesWithVotersConfig(): Unit = { + val availableDirs = Seq(TestUtils.tempDir()) + val properties = new Properties() + properties.putAll(defaultDynamicQuorumProperties) + properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") + properties.setProperty("log.dirs", availableDirs.mkString(",")) + val stream = new ByteArrayOutputStream() + val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--no-initial-controllers") + assertEquals(0, runFormatCommand(stream, properties, arguments.toSeq)) + } + @ParameterizedTest @ValueSource(booleans = Array(false, true)) def testFormatWithStandaloneFlag(setKraftVersionFeature: Boolean): Unit = { @@ -458,19 +605,14 @@ Found problem: Seq("--release-version", "3.9-IV0"))).getMessage) } - @ParameterizedTest - @ValueSource(booleans = Array(false, true)) - def testFormatWithNoInitialControllersSucceedsOnController(setKraftVersionFeature: Boolean): Unit = { + @Test + def testFormatWithNoInitialControllersSucceedsOnController(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) val properties = new Properties() properties.putAll(defaultDynamicQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream = new ByteArrayOutputStream() val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--no-initial-controllers") - if (setKraftVersionFeature) { - arguments += "--feature" - arguments += "kraft.version=1" - } assertEquals(0, runFormatCommand(stream, properties, arguments.toSeq)) assertTrue(stream.toString(). contains("Formatting metadata directory %s".format(availableDirs.head)), @@ -588,18 +730,18 @@ Found problem: runVersionMappingCommand(stream, "2.9-IV2") }) - assertEquals("Unknown release version '2.9-IV2'." + - " Supported versions are: " + MetadataVersion.MINIMUM_VERSION.version + - " to " + MetadataVersion.LATEST_PRODUCTION.version, exception.getMessage + assertEquals("Unknown metadata.version '2.9-IV2'. Supported metadata.version are: " + + MetadataVersion.metadataVersionsToString(MetadataVersion.MINIMUM_VERSION, MetadataVersion.latestTesting()), + exception.getMessage ) val exception2 = assertThrows(classOf[TerseFailure], () => { runVersionMappingCommand(stream, "invalid") }) - assertEquals("Unknown release version 'invalid'." + - " Supported versions are: " + MetadataVersion.MINIMUM_VERSION.version + - " to " + MetadataVersion.LATEST_PRODUCTION.version, exception2.getMessage + assertEquals("Unknown metadata.version 'invalid'. Supported metadata.version are: " + + MetadataVersion.metadataVersionsToString(MetadataVersion.MINIMUM_VERSION, MetadataVersion.latestTesting()), + exception2.getMessage ) } diff --git a/core/src/test/scala/unit/kafka/utils/PoolTest.scala b/core/src/test/scala/unit/kafka/utils/PoolTest.scala deleted file mode 100644 index 4f883296ef7cf..0000000000000 --- a/core/src/test/scala/unit/kafka/utils/PoolTest.scala +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.api.Test - - -class PoolTest { - @Test - def testRemoveAll(): Unit = { - val pool = new Pool[Int, String] - pool.put(1, "1") - pool.put(2, "2") - pool.put(3, "3") - - assertEquals(3, pool.size) - - pool.removeAll(Seq(1, 2)) - assertEquals(1, pool.size) - assertEquals("3", pool.get(3)) - pool.removeAll(Seq(3)) - assertEquals(0, pool.size) - } -} diff --git a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala deleted file mode 100644 index 24be1e921bc24..0000000000000 --- a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.utils - -import java.util.{Optional, Properties} -import java.util.concurrent.atomic._ -import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, Executors, TimeUnit} -import kafka.utils.TestUtils.retry -import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.util.{KafkaScheduler, MockTime} -import org.apache.kafka.storage.internals.log.{LocalLog, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetsListener, LogSegments, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog} -import org.apache.kafka.storage.log.metrics.BrokerTopicStats -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, Timeout} - - -class SchedulerTest { - - val scheduler = new KafkaScheduler(1) - val mockTime = new MockTime - val counter1 = new AtomicInteger(0) - val counter2 = new AtomicInteger(0) - - @BeforeEach - def setup(): Unit = { - scheduler.startup() - } - - @AfterEach - def teardown(): Unit = { - scheduler.shutdown() - } - - @Test - def testMockSchedulerNonPeriodicTask(): Unit = { - mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) - mockTime.scheduler.scheduleOnce("test2", () => counter2.getAndIncrement(), 100) - assertEquals(0, counter1.get, "Counter1 should not be incremented prior to task running.") - assertEquals(0, counter2.get, "Counter2 should not be incremented prior to task running.") - mockTime.sleep(1) - assertEquals(1, counter1.get, "Counter1 should be incremented") - assertEquals(0, counter2.get, "Counter2 should not be incremented") - mockTime.sleep(100000) - assertEquals(1, counter1.get, "More sleeping should not result in more incrementing on counter1.") - assertEquals(1, counter2.get, "Counter2 should now be incremented.") - } - - @Test - def testMockSchedulerPeriodicTask(): Unit = { - mockTime.scheduler.schedule("test1", () => counter1.getAndIncrement(), 1, 1) - mockTime.scheduler.schedule("test2", () => counter2.getAndIncrement(), 100, 100) - assertEquals(0, counter1.get, "Counter1 should not be incremented prior to task running.") - assertEquals(0, counter2.get, "Counter2 should not be incremented prior to task running.") - mockTime.sleep(1) - assertEquals(1, counter1.get, "Counter1 should be incremented") - assertEquals(0, counter2.get, "Counter2 should not be incremented") - mockTime.sleep(100) - assertEquals(101, counter1.get, "Counter1 should be incremented 101 times") - assertEquals(1, counter2.get, "Counter2 should not be incremented once") - } - - @Test - def testReentrantTaskInMockScheduler(): Unit = { - mockTime.scheduler.scheduleOnce("test1", () => mockTime.scheduler.scheduleOnce("test2", () => counter2.getAndIncrement(), 0), 1) - mockTime.sleep(1) - assertEquals(1, counter2.get) - } - - @Test - def testNonPeriodicTask(): Unit = { - scheduler.scheduleOnce("test", () => counter1.getAndIncrement()) - retry(30000) { - assertEquals(counter1.get, 1) - } - Thread.sleep(5) - assertEquals(1, counter1.get, "Should only run once") - } - - @Test - def testNonPeriodicTaskWhenPeriodIsZero(): Unit = { - scheduler.schedule("test", () => counter1.getAndIncrement(), 0, 0) - retry(30000) { - assertEquals(counter1.get, 1) - } - Thread.sleep(5) - assertEquals(1, counter1.get, "Should only run once") - } - - @Test - def testPeriodicTask(): Unit = { - scheduler.schedule("test", () => counter1.getAndIncrement(), 0, 5) - retry(30000) { - assertTrue(counter1.get >= 20, "Should count to 20") - } - } - - @Test - def testRestart(): Unit = { - // schedule a task to increment a counter - mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) - mockTime.sleep(1) - assertEquals(1, counter1.get()) - - // restart the scheduler - mockTime.scheduler.shutdown() - mockTime.scheduler.startup() - - // schedule another task to increment the counter - mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) - mockTime.sleep(1) - assertEquals(2, counter1.get()) - } - - @Test - def testUnscheduleProducerTask(): Unit = { - val tmpDir = TestUtils.tempDir() - val logDir = TestUtils.randomPartitionLogDir(tmpDir) - val logConfig = new LogConfig(new Properties()) - val brokerTopicStats = new BrokerTopicStats - val maxTransactionTimeoutMs = 5 * 60 * 1000 - val maxProducerIdExpirationMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT - val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT - val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) - val logDirFailureChannel = new LogDirFailureChannel(10) - val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) - val producerStateManagerConfig = new ProducerStateManagerConfig(maxProducerIdExpirationMs, false) - val producerStateManager = new ProducerStateManager(topicPartition, logDir, - maxTransactionTimeoutMs, producerStateManagerConfig, mockTime) - val offsets = new LogLoader( - logDir, - topicPartition, - logConfig, - scheduler, - mockTime, - logDirFailureChannel, - true, - segments, - 0L, - 0L, - leaderEpochCache, - producerStateManager, - new ConcurrentHashMap[String, Integer], - false - ).load() - val localLog = new LocalLog(logDir, logConfig, segments, offsets.recoveryPoint, - offsets.nextOffsetMetadata, scheduler, mockTime, topicPartition, logDirFailureChannel) - val log = new UnifiedLog(offsets.logStartOffset, - localLog, - brokerTopicStats, - producerIdExpirationCheckIntervalMs, - leaderEpochCache, - producerStateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) - assertTrue(scheduler.taskRunning(log.producerExpireCheck)) - log.close() - assertFalse(scheduler.taskRunning(log.producerExpireCheck)) - } - - /** - * Verify that scheduler lock is not held when invoking task method, allowing new tasks to be scheduled - * when another is being executed. This is required to avoid deadlocks when: - * a) Thread1 executes a task which attempts to acquire LockA - * b) Thread2 holding LockA attempts to schedule a new task - */ - @Timeout(15) - @Test - def testMockSchedulerLocking(): Unit = { - val initLatch = new CountDownLatch(1) - val completionLatch = new CountDownLatch(2) - val taskLatches = List(new CountDownLatch(1), new CountDownLatch(1)) - def scheduledTask(taskLatch: CountDownLatch): Unit = { - initLatch.countDown() - assertTrue(taskLatch.await(30, TimeUnit.SECONDS), "Timed out waiting for latch") - completionLatch.countDown() - } - mockTime.scheduler.scheduleOnce("test1", () => scheduledTask(taskLatches.head), 1) - val tickExecutor = Executors.newSingleThreadScheduledExecutor() - try { - tickExecutor.scheduleWithFixedDelay(() => mockTime.sleep(1), 0, 1, TimeUnit.MILLISECONDS) - - // wait for first task to execute and then schedule the next task while the first one is running - assertTrue(initLatch.await(10, TimeUnit.SECONDS)) - mockTime.scheduler.scheduleOnce("test2", () => scheduledTask(taskLatches(1)), 1) - - taskLatches.foreach(_.countDown()) - assertTrue(completionLatch.await(10, TimeUnit.SECONDS), "Tasks did not complete") - - } finally { - tickExecutor.shutdownNow() - } - } - - @Test - def testPendingTaskSize(): Unit = { - val latch1 = new CountDownLatch(1) - val latch2 = new CountDownLatch(2) - val task1 = new Runnable { - override def run(): Unit = { - latch1.await() - } - } - scheduler.scheduleOnce("task1", task1, 0) - scheduler.scheduleOnce("task2", () => latch2.countDown(), 5) - scheduler.scheduleOnce("task3", () => latch2.countDown(), 5) - assertEquals(2, scheduler.pendingTaskSize()) - latch1.countDown() - latch2.await() - assertEquals(0, scheduler.pendingTaskSize()) - scheduler.shutdown() - assertEquals(0, scheduler.pendingTaskSize()) - } -} diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 8281509f6092d..8b0affae9eab9 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -56,7 +56,7 @@ import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfi import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Assertions._ @@ -72,7 +72,7 @@ import java.time.Duration import java.util import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean -import java.util.{Collections, Optional, Properties} +import java.util.{Optional, Properties} import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, mutable} import scala.concurrent.duration.FiniteDuration @@ -181,7 +181,7 @@ object TestUtils extends Logging { listenerName: ListenerName ): String = { brokers.map { s => - val listener = s.config.effectiveAdvertisedBrokerListeners.find(_.listenerName == listenerName).getOrElse( + val listener = s.config.effectiveAdvertisedBrokerListeners.find(_.listener == listenerName.value).getOrElse( sys.error(s"Could not find listener with name ${listenerName.value}")) formatAddress(listener.host, s.boundPort(listenerName)) }.mkString(",") @@ -339,7 +339,7 @@ object TestUtils extends Logging { topicConfig.forEach((k, v) => configsMap.put(k.toString, v.toString)) val result = if (replicaAssignment.isEmpty) { - admin.createTopics(Collections.singletonList(new NewTopic( + admin.createTopics(util.List.of(new NewTopic( topic, numPartitions, replicationFactor.toShort).configs(configsMap))) } else { val assignment = new util.HashMap[Integer, util.List[Integer]]() @@ -348,7 +348,7 @@ object TestUtils extends Logging { v.foreach(r => replicas.add(r.asInstanceOf[Integer])) assignment.put(k.asInstanceOf[Integer], replicas) } - admin.createTopics(Collections.singletonList(new NewTopic( + admin.createTopics(util.List.of(new NewTopic( topic, assignment).configs(configsMap))) } @@ -410,7 +410,7 @@ object TestUtils extends Logging { topic: String ): TopicDescription = { val describedTopics = admin.describeTopics( - Collections.singleton(topic) + util.Set.of(topic) ).allTopicNames().get() describedTopics.get(topic) } @@ -466,7 +466,7 @@ object TestUtils extends Logging { controllers: Seq[ControllerServer] ): Unit = { try { - admin.deleteTopics(Collections.singletonList(topic)).all().get() + admin.deleteTopics(util.List.of(topic)).all().get() } catch { case e: ExecutionException if e.getCause != null && e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => @@ -589,7 +589,7 @@ object TestUtils extends Logging { newLeaderOpt: Option[Int] = None ): Int = { def getPartitionLeader(topic: String, partition: Int): Option[Int] = { - admin.describeTopics(Collections.singletonList(topic)).allTopicNames().get().get(topic).partitions().asScala. + admin.describeTopics(util.List.of(topic)).allTopicNames().get().get(topic).partitions().asScala. find(_.partition() == partition). flatMap { p => if (p.leader().id() == Node.noNode().id()) { @@ -821,7 +821,7 @@ object TestUtils extends Logging { waitUntilTrue( () => brokers.forall { broker => if (expectedNumPartitions == 0) { - broker.metadataCache.numPartitions(topic).isEmpty() + broker.metadataCache.numPartitions(topic).isEmpty } else { broker.metadataCache.numPartitions(topic).orElse(null) == expectedNumPartitions } @@ -900,7 +900,6 @@ object TestUtils extends Logging { } else if (oldLeaderOpt.isDefined) { debug(s"Checking leader that has changed from $oldLeaderOpt") brokers.find { broker => - broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) broker.config.brokerId != oldLeaderOpt.get && broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) }.map(_.config.brokerId) @@ -953,7 +952,7 @@ object TestUtils extends Logging { time: MockTime = new MockTime(), recoveryThreadsPerDataDir: Int = 4, transactionVerificationEnabled: Boolean = false, - log: Option[UnifiedLog] = None, + logFn: Option[(TopicPartition, Option[Uuid]) => UnifiedLog] = None, remoteStorageSystemEnable: Boolean = false, initialTaskDelayMs: Long = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT): LogManager = { val logManager = new LogManager(logDirs = logDirs.map(_.getAbsoluteFile), @@ -974,11 +973,17 @@ object TestUtils extends Logging { brokerTopicStats = new BrokerTopicStats, logDirFailureChannel = new LogDirFailureChannel(logDirs.size), remoteStorageSystemEnable = remoteStorageSystemEnable, - initialTaskDelayMs = initialTaskDelayMs) + initialTaskDelayMs = initialTaskDelayMs, + cleanerFactory = (cleanerConfig, files, map, logDirFailureChannel, time) => Mockito.spy(new LogCleaner(cleanerConfig, files, map, logDirFailureChannel, time)) + ) - if (log.isDefined) { + if (logFn.isDefined) { val spyLogManager = Mockito.spy(logManager) - Mockito.doReturn(log.get, Nil: _*).when(spyLogManager).getOrCreateLog(any(classOf[TopicPartition]), anyBoolean(), anyBoolean(), any(classOf[Optional[Uuid]]), any(classOf[Option[Uuid]])) + Mockito.doAnswer(answer => { + val topicPartition = answer.getArgument(0, classOf[TopicPartition]) + val topicId = answer.getArgument(3, classOf[Optional[Uuid]]) + logFn.get(topicPartition, OptionConverters.toScala(topicId)) + }).when(spyLogManager).getOrCreateLog(any(classOf[TopicPartition]), anyBoolean(), anyBoolean(), any(classOf[Optional[Uuid]]), any(classOf[Option[Uuid]])) spyLogManager } else logManager @@ -1089,16 +1094,16 @@ object TestUtils extends Logging { checkpoints.forall(checkpointsPerLogDir => !checkpointsPerLogDir.containsKey(tp)) }), "Cleaner offset for deleted partition should have been removed") waitUntilTrue(() => brokers.forall(broker => - broker.config.logDirs.forall { logDir => + broker.config.logDirs.stream().allMatch { logDir => topicPartitions.forall { tp => !new File(logDir, tp.topic + "-" + tp.partition).exists() } } ), "Failed to soft-delete the data to a delete directory") waitUntilTrue(() => brokers.forall(broker => - broker.config.logDirs.forall { logDir => + broker.config.logDirs.stream().allMatch { logDir => topicPartitions.forall { tp => - !util.Arrays.asList(new File(logDir).list()).asScala.exists { partitionDirectoryNames => + !util.List.of(new File(logDir).list()).asScala.exists { partitionDirectoryNames => partitionDirectoryNames.exists { directoryName => directoryName.startsWith(tp.topic + "-" + tp.partition) && directoryName.endsWith(UnifiedLog.DELETE_DIR_SUFFIX) @@ -1155,7 +1160,7 @@ object TestUtils extends Logging { securityProtocol = securityProtocol, trustStoreFile = trustStoreFile) try { - consumer.subscribe(Collections.singleton(topic)) + consumer.subscribe(util.Set.of(topic)) consumeRecords(consumer, numMessages, waitTime) } finally consumer.close() } @@ -1253,7 +1258,7 @@ object TestUtils extends Logging { else abortedValue } - new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, key, value, Collections.singleton(header)) + new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, key, value, util.Set.of(header)) } def producerRecordWithExpectedTransactionStatus(topic: String, partition: Integer, key: String, value: String, willBeCommitted: Boolean): ProducerRecord[Array[Byte], Array[Byte]] = { @@ -1276,7 +1281,7 @@ object TestUtils extends Logging { if (committed.contains(topicPartition)) consumer.seek(topicPartition, committed(topicPartition)) else - consumer.seekToBeginning(Collections.singletonList(topicPartition)) + consumer.seekToBeginning(util.List.of(topicPartition)) } } @@ -1293,7 +1298,7 @@ object TestUtils extends Logging { (resource, configEntries) }.toMap.asJava } else { - Map(new ConfigResource(ConfigResource.Type.BROKER, "") -> configEntries).asJava + util.Map.of(new ConfigResource(ConfigResource.Type.BROKER, ""), configEntries) } adminClient.incrementalAlterConfigs(configs) } @@ -1322,7 +1327,7 @@ object TestUtils extends Logging { val partitionId = topicPartition.partition def currentLeader: Try[Option[Int]] = Try { - val topicDescription = client.describeTopics(List(topic).asJava).allTopicNames.get.get(topic) + val topicDescription = client.describeTopics(util.List.of(topic)).allTopicNames.get.get(topic) topicDescription.partitions.asScala .find(_.partition == partitionId) .flatMap(partitionState => Option(partitionState.leader)) @@ -1356,7 +1361,7 @@ object TestUtils extends Logging { } def currentIsr(admin: Admin, partition: TopicPartition): Set[Int] = { - val description = admin.describeTopics(Set(partition.topic).asJava) + val description = admin.describeTopics(util.Set.of(partition.topic)) .allTopicNames .get .asScala diff --git a/docker/README.md b/docker/README.md index 0942cfd05bb56..c4b9d49d0eaf1 100644 --- a/docker/README.md +++ b/docker/README.md @@ -130,6 +130,10 @@ python docker_build_test.py kafka/test --image-tag=3.6.0 --image-type=jvm --kafk ``` python docker_build_test.py kafka/test --image-tag=3.8.0 --image-type=native --kafka-url=https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz ``` +- Example(local build archive with jvm or native image type) :- To build and test an image named test with local build archive +``` +python docker_build_test.py kafka/test --image-tag=local-build --image-type= --kafka-archive= +``` Creating a Release Candidate ---------------------------- @@ -141,13 +145,13 @@ Creating a Release Candidate ``` # kafka/test is an example repo. Please replace with the docker hub repo you have push access to. -python docker_release.py kafka/test:3.6.0 --kafka-url --image-type=jvm https://archive.apache.org/dist/kafka/3.6.0/kafka_2.13-3.6.0.tgz +python docker_release.py kafka/test:3.6.0 --image-type=jvm --kafka-url=https://archive.apache.org/dist/kafka/3.6.0/kafka_2.13-3.6.0.tgz ``` - Example(native):- To push an image named test under kafka-native dockerhub namespace with 3.8.0 tag and native image type ensuring kafka to be containerised should be https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz (it is recommended to use scala 2.13 binary tarball), following command can be used. (Make sure you have push access to the docker repo) ``` # kafka-native/test is an example repo. Please replace with the docker hub repo you have push access to. -python docker_release.py kafka-native/test:3.8.0 --kafka-url --image-type=native https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz +python docker_release.py kafka-native/test:3.8.0 --image-type=native --kafka-url=https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz ``` - Please note that we use docker buildx for preparing the multi-architecture image and pushing it to docker registry. It's possible to encounter build failures because of buildx. Please retry the command in case some buildx related error occurs. diff --git a/docker/common.py b/docker/common.py index 9c0f901823fa5..5099a789da103 100644 --- a/docker/common.py +++ b/docker/common.py @@ -18,9 +18,6 @@ import subprocess import tempfile import os -from distutils.file_util import copy_file - -from distutils.dir_util import copy_tree import shutil def execute(command): @@ -33,12 +30,14 @@ def get_input(message): raise ValueError("This field cannot be empty") return value -def build_docker_image_runner(command, image_type): +def build_docker_image_runner(command, image_type, kafka_archive=None): temp_dir_path = tempfile.mkdtemp() current_dir = os.path.dirname(os.path.realpath(__file__)) - copy_tree(f"{current_dir}/{image_type}", f"{temp_dir_path}/{image_type}") - copy_tree(f"{current_dir}/resources", f"{temp_dir_path}/{image_type}/resources") - copy_file(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") + shutil.copytree(f"{current_dir}/{image_type}", f"{temp_dir_path}/{image_type}", dirs_exist_ok=True) + shutil.copytree(f"{current_dir}/resources", f"{temp_dir_path}/{image_type}/resources", dirs_exist_ok=True) + shutil.copy(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") + if kafka_archive: + shutil.copy(kafka_archive, f"{temp_dir_path}/{image_type}/kafka.tgz") command = command.replace("$DOCKER_FILE", f"{temp_dir_path}/{image_type}/Dockerfile") command = command.replace("$DOCKER_DIR", f"{temp_dir_path}/{image_type}") try: diff --git a/docker/docker_build_test.py b/docker/docker_build_test.py index 793148573f395..9a986875fe320 100755 --- a/docker/docker_build_test.py +++ b/docker/docker_build_test.py @@ -25,31 +25,28 @@ Example command:- docker_build_test.py --image-tag --image-type --kafka-url + docker_build_test.py --image-tag --image-type --kafka-archive This command will build an image with as image name, as image_tag (it will be latest by default), as image type (jvm by default), for the kafka inside the image and run tests on the image. + can be passed as an alternative to to use a local kafka archive. The path of kafka_archive should be absolute. -b can be passed as additional argument if you just want to build the image. -t can be passed if you just want to run tests on the image. """ from datetime import date import argparse -from distutils.dir_util import copy_tree import shutil from test.docker_sanity_test import run_tests from common import execute, build_docker_image_runner import tempfile import os -def build_docker_image(image, tag, kafka_url, image_type): - image = f'{image}:{tag}' - build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {image} --build-arg kafka_url={kafka_url} --build-arg build_date={date.today()} $DOCKER_DIR", image_type) - def run_docker_tests(image, tag, kafka_url, image_type): temp_dir_path = tempfile.mkdtemp() try: current_dir = os.path.dirname(os.path.realpath(__file__)) - copy_tree(f"{current_dir}/test/fixtures", f"{temp_dir_path}/fixtures") + shutil.copytree(f"{current_dir}/test/fixtures", f"{temp_dir_path}/fixtures", dirs_exist_ok=True) execute(["wget", "-nv", "-O", f"{temp_dir_path}/kafka.tgz", kafka_url]) execute(["mkdir", f"{temp_dir_path}/fixtures/kafka"]) execute(["tar", "xfz", f"{temp_dir_path}/kafka.tgz", "-C", f"{temp_dir_path}/fixtures/kafka", "--strip-components", "1"]) @@ -69,16 +66,20 @@ def run_docker_tests(image, tag, kafka_url, image_type): parser.add_argument("image", help="Image name that you want to keep for the Docker image") parser.add_argument("--image-tag", "-tag", default="latest", dest="tag", help="Image tag that you want to add to the image") parser.add_argument("--image-type", "-type", choices=["jvm", "native"], default="jvm", dest="image_type", help="Image type you want to build") - parser.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image") parser.add_argument("--build", "-b", action="store_true", dest="build_only", default=False, help="Only build the image, don't run tests") parser.add_argument("--test", "-t", action="store_true", dest="test_only", default=False, help="Only run the tests, don't build the image") + + archive_group = parser.add_mutually_exclusive_group(required=True) + archive_group.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image") + archive_group.add_argument("--kafka-archive", "-a", dest="kafka_archive", help="Kafka archive to be used to extract kafka binary tarball in the docker image") + args = parser.parse_args() if args.build_only or not (args.build_only or args.test_only): if args.kafka_url: - build_docker_image(args.image, args.tag, args.kafka_url, args.image_type) - else: - raise ValueError("--kafka-url is a required argument for docker image") + build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg kafka_url={args.kafka_url} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type) + elif args.kafka_archive: + build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type, args.kafka_archive) if args.test_only or not (args.build_only or args.test_only): run_docker_tests(args.image, args.tag, args.kafka_url, args.image_type) diff --git a/docker/docker_official_image_build_test.py b/docker/docker_official_image_build_test.py index 3da68854c2318..32869a1f4b209 100644 --- a/docker/docker_official_image_build_test.py +++ b/docker/docker_official_image_build_test.py @@ -34,7 +34,6 @@ """ import argparse -from distutils.dir_util import copy_tree import shutil from common import execute from docker_build_test import run_docker_tests @@ -46,10 +45,11 @@ def build_docker_official_image(image, tag, kafka_version, image_type): image = f'{image}:{tag}' current_dir = os.path.dirname(os.path.realpath(__file__)) temp_dir_path = tempfile.mkdtemp() - copy_tree(f"{current_dir}/docker_official_images/{kafka_version}/{image_type}", - f"{temp_dir_path}/{image_type}") - copy_tree(f"{current_dir}/docker_official_images/{kafka_version}/jvm/resources", - f"{temp_dir_path}/{image_type}/resources") + shutil.copytree(f"{current_dir}/docker_official_images/{kafka_version}/{image_type}", + f"{temp_dir_path}/{image_type}", dirs_exist_ok=True) + shutil.copytree(f"{current_dir}/docker_official_images/{kafka_version}/jvm/resources", + f"{temp_dir_path}/{image_type}/resources", dirs_exist_ok=True) + shutil.copy(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") command = f"docker build -f $DOCKER_FILE -t {image} $DOCKER_DIR" command = command.replace("$DOCKER_FILE", f"{temp_dir_path}/{image_type}/Dockerfile") command = command.replace("$DOCKER_DIR", f"{temp_dir_path}/{image_type}") diff --git a/docker/examples/README.md b/docker/examples/README.md index bd6fcefd8282e..162e27c711aca 100644 --- a/docker/examples/README.md +++ b/docker/examples/README.md @@ -13,6 +13,25 @@ Kafka server can be started using following ways: - File input - Environment variables +Installation Preparation +------------ + +Note that the `Docker` version **must be >= 20.10.4**. + +The prior Docker versions may cause permission errors when running the Kafka container, as they do not correctly set directory permissions when creating container paths like `/opt/kafka/config`. + +If you are using the prior version, you may encounter the following error during container startup: +```text +===> User +uid=1000(appuser) gid=1000(appuser) groups=1000(appuser) +===> Setting default values of environment variables if not already set. +===> Configuring … +Running in KRaft mode… +/opt/kafka/config/ file not writable +``` + +To avoid this, **please upgrade Docker to 20.10.4 or later**. + Running on default configs -------------------------- @@ -128,7 +147,7 @@ Single Node - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - File Input: - Here ssl configs are provided via file input. @@ -148,7 +167,7 @@ Single Node - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` Multi Node Cluster @@ -200,7 +219,7 @@ Multi Node Cluster - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - Isolated: - Examples are present in `docker-compose-files/cluster/isolated` directory. @@ -239,7 +258,7 @@ Multi Node Cluster - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - Note that the examples are meant to be tried one at a time, make sure you close an example server before trying out the other to avoid conflicts. diff --git a/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml index e27b8ebba5a59..6ca7081bb760e 100644 --- a/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: kafka-1: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml b/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml index dba7fe87d3cc6..0b80670a2ec34 100644 --- a/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: kafka-1: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml index 946cb5e3293ef..e4994ce9318e5 100644 --- a/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: controller-1: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml b/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml index 5a99526b60981..aefb84b024cc1 100644 --- a/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: controller-1: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml b/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml index 34a7264d3e27e..8a4fd85b9badc 100644 --- a/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: broker: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml index c7ae370504203..c33c947f073cc 100644 --- a/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: broker: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml b/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml index 91df50ecf9551..cc98efcefcd0d 100644 --- a/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: broker: image: ${IMAGE} diff --git a/docker/jvm/Dockerfile b/docker/jvm/Dockerfile index f98f50a2e0390..1c7c95d48698f 100644 --- a/docker/jvm/Dockerfile +++ b/docker/jvm/Dockerfile @@ -23,20 +23,27 @@ USER root # Get kafka from https://archive.apache.org/dist/kafka and pass the url through build arguments ARG kafka_url +ENV KAFKA_URL=$kafka_url + COPY jsa_launch /etc/kafka/docker/jsa_launch COPY server.properties /etc/kafka/docker/server.properties +COPY *kafka.tgz kafka.tgz + RUN set -eux ; \ apk update ; \ apk upgrade ; \ - apk add --no-cache wget gcompat gpg gpg-agent procps bash; \ + apk add --no-cache bash; \ + if [ -n "$KAFKA_URL" ]; then \ + apk add --no-cache wget gcompat gpg gpg-agent procps; \ + wget -nv -O kafka.tgz "$KAFKA_URL"; \ + wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz; \ + fi; \ mkdir opt/kafka; \ - wget -nv -O kafka.tgz "$kafka_url"; \ - wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \ - tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz + tar xfz kafka.tgz -C opt/kafka --strip-components 1; # Generate jsa files using dynamic CDS for kafka server start command and kafka storage format command RUN /etc/kafka/docker/jsa_launch @@ -53,24 +60,35 @@ USER root ARG kafka_url ARG build_date +ENV KAFKA_URL=$kafka_url + +COPY *kafka.tgz kafka.tgz LABEL org.label-schema.name="kafka" \ org.label-schema.description="Apache Kafka" \ org.label-schema.build-date="${build_date}" \ org.label-schema.vcs-url="https://github.com/apache/kafka" \ - maintainer="Apache Kafka" + org.opencontainers.image.authors="Apache Kafka" -RUN set -eux ; \ +RUN mkdir opt/kafka; \ + set -eux ; \ apk update ; \ apk upgrade ; \ - apk add --no-cache wget gcompat gpg gpg-agent procps bash; \ - mkdir opt/kafka; \ - wget -nv -O kafka.tgz "$kafka_url"; \ - wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \ - tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz; \ + apk add --no-cache bash; \ + if [ -n "$KAFKA_URL" ]; then \ + apk add --no-cache wget gcompat gpg gpg-agent procps; \ + wget -nv -O kafka.tgz "$KAFKA_URL"; \ + wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz; \ + rm kafka.tgz kafka.tgz.asc KEYS; \ + apk del wget gpg gpg-agent; \ + else \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + rm kafka.tgz; \ + fi; \ mkdir -p /var/lib/kafka/data /etc/kafka/secrets; \ mkdir -p /etc/kafka/docker /usr/logs /mnt/shared/config; \ adduser -h /home/appuser -D --shell /bin/bash appuser; \ @@ -79,8 +97,6 @@ RUN set -eux ; \ chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \ cp /opt/kafka/config/log4j2.yaml /etc/kafka/docker/log4j2.yaml; \ cp /opt/kafka/config/tools-log4j2.yaml /etc/kafka/docker/tools-log4j2.yaml; \ - rm kafka.tgz kafka.tgz.asc KEYS; \ - apk del wget gpg gpg-agent; \ apk cache clean; COPY server.properties /etc/kafka/docker/server.properties diff --git a/docker/native/Dockerfile b/docker/native/Dockerfile index ca85f35562df1..d458dab72ce64 100644 --- a/docker/native/Dockerfile +++ b/docker/native/Dockerfile @@ -29,15 +29,18 @@ ENV TARGET_PATH="$KAFKA_DIR/kafka.Kafka" COPY native-image-configs $NATIVE_CONFIGS_DIR COPY native_command.sh native_command.sh -RUN mkdir $KAFKA_DIR; \ - microdnf install wget; \ - wget -nv -O kafka.tgz "$KAFKA_URL"; \ - wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ +COPY *kafka.tgz /app + +RUN if [ -n "$KAFKA_URL" ]; then \ + microdnf install wget; \ + wget -nv -O kafka.tgz "$KAFKA_URL"; \ + wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz; \ + fi; \ + mkdir $KAFKA_DIR; \ tar xfz kafka.tgz -C $KAFKA_DIR --strip-components 1; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz; \ - rm kafka.tgz ; \ # Build the native-binary of the apache kafka using graalVM native-image. /app/native_command.sh $NATIVE_IMAGE_PATH $NATIVE_CONFIGS_DIR $KAFKA_LIBS_DIR $TARGET_PATH @@ -52,7 +55,7 @@ LABEL org.label-schema.name="kafka" \ org.label-schema.description="Apache Kafka" \ org.label-schema.build-date="${build_date}" \ org.label-schema.vcs-url="https://github.com/apache/kafka" \ - maintainer="Apache Kafka" + org.opencontainers.image.authors="Apache Kafka" RUN apk update ; \ apk add --no-cache gcompat ; \ diff --git a/docker/native/native-image-configs/reflect-config.json b/docker/native/native-image-configs/reflect-config.json index f4263a08898f2..c953ea9fe1d8e 100644 --- a/docker/native/native-image-configs/reflect-config.json +++ b/docker/native/native-image-configs/reflect-config.json @@ -1023,6 +1023,12 @@ "name":"org.apache.kafka.common.security.kerberos.KerberosLogin", "methods":[{"name":"","parameterTypes":[] }] }, +{ + "name":"org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever" +}, +{ + "name":"org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator" +}, { "name":"org.apache.kafka.common.security.plain.PlainLoginModule", "methods":[{"name":"","parameterTypes":[] }] @@ -1067,6 +1073,18 @@ "name":"org.apache.kafka.metadata.authorizer.StandardAuthorizer", "methods":[{"name":"","parameterTypes":[] }] }, +{ + "name":"org.apache.kafka.server.logger.LoggingController", + "queryAllPublicConstructors":true +}, +{ + "name":"org.apache.kafka.server.logger.LoggingControllerMBean", + "queryAllPublicMethods":true +}, +{ + "name":"org.apache.kafka.server.share.persister.DefaultStatePersister", + "methods":[{"name":"","parameterTypes":["org.apache.kafka.server.share.persister.PersisterStateManager"] }] +}, { "name":"org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler$Content", "allDeclaredFields":true, diff --git a/docker/prepare_docker_official_image_source.py b/docker/prepare_docker_official_image_source.py index 25d57c53e0fd8..bbc539b5c4c54 100644 --- a/docker/prepare_docker_official_image_source.py +++ b/docker/prepare_docker_official_image_source.py @@ -33,7 +33,6 @@ from datetime import date import argparse -from distutils.dir_util import copy_tree import os import shutil import re @@ -61,12 +60,10 @@ def remove_args_and_hardcode_values(file_path, kafka_version, kafka_url): args = parser.parse_args() kafka_url = f"https://archive.apache.org/dist/kafka/{args.kafka_version}/kafka_2.13-{args.kafka_version}.tgz" current_dir = os.path.dirname(os.path.realpath(__file__)) - new_dir = os.path.join( - current_dir, f'docker_official_images', args.kafka_version) + new_dir = os.path.join(current_dir, 'docker_official_images', args.kafka_version) if os.path.exists(new_dir): shutil.rmtree(new_dir) os.makedirs(new_dir) - copy_tree(os.path.join(current_dir, args.image_type), os.path.join(new_dir, args.kafka_version, args.image_type)) - copy_tree(os.path.join(current_dir, 'resources'), os.path.join(new_dir, args.kafka_version, args.image_type, 'resources')) - remove_args_and_hardcode_values( - os.path.join(new_dir, args.kafka_version, args.image_type, 'Dockerfile'), args.kafka_version, kafka_url) + shutil.copytree(os.path.join(current_dir, args.image_type), os.path.join(new_dir, args.image_type), dirs_exist_ok=True) + shutil.copytree(os.path.join(current_dir, 'resources'), os.path.join(new_dir, args.image_type, 'resources'), dirs_exist_ok=True) + remove_args_and_hardcode_values(os.path.join(new_dir, args.image_type, 'Dockerfile'), args.kafka_version, kafka_url) diff --git a/docker/server.properties b/docker/server.properties index eb0b445c344db..8ed486f3736c4 100644 --- a/docker/server.properties +++ b/docker/server.properties @@ -87,8 +87,8 @@ log.dirs=/tmp/kraft-combined-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 +# This value is recommended to be increased based on the installation resources. +num.recovery.threads.per.data.dir=2 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" diff --git a/docker/test/docker_sanity_test.py b/docker/test/docker_sanity_test.py index d2135fb029507..0d21bf47fee0d 100644 --- a/docker/test/docker_sanity_test.py +++ b/docker/test/docker_sanity_test.py @@ -65,7 +65,7 @@ def produce_message(self, topic, producer_config, key, value): subprocess.run(["bash", "-c", " ".join(command)]) def consume_message(self, topic, consumer_config): - command = [f"{self.FIXTURES_DIR}/{constants.KAFKA_CONSOLE_CONSUMER}", "--topic", topic, "--property", "'print.key=true'", "--property", "'key.separator=:'", "--from-beginning", "--max-messages", "1", "--timeout-ms", f"{constants.CLIENT_TIMEOUT}"] + command = [f"{self.FIXTURES_DIR}/{constants.KAFKA_CONSOLE_CONSUMER}", "--topic", topic, "--formatter-property", "'print.key=true'", "--formatter-property", "'key.separator=:'", "--from-beginning", "--max-messages", "1", "--timeout-ms", f"{constants.CLIENT_TIMEOUT}"] command.extend(consumer_config) message = subprocess.check_output(["bash", "-c", " ".join(command)]) return message.decode("utf-8").strip() @@ -93,9 +93,9 @@ def broker_metrics_flow(self): errors.append(constants.BROKER_METRICS_ERROR_PREFIX + str(e)) return errors - producer_config = ["--bootstrap-server", "localhost:9092", "--property", "client.id=host"] + producer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "client.id=host"] self.produce_message(constants.BROKER_METRICS_TEST_TOPIC, producer_config, "key", "message") - consumer_config = ["--bootstrap-server", "localhost:9092", "--property", "auto.offset.reset=earliest"] + consumer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "auto.offset.reset=earliest"] message = self.consume_message(constants.BROKER_METRICS_TEST_TOPIC, consumer_config) try: self.assertEqual(message, "key:message") @@ -129,13 +129,13 @@ def ssl_flow(self, ssl_broker_port, test_name, test_error_prefix, topic): return errors producer_config = ["--bootstrap-server", ssl_broker_port, - "--producer.config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}"] + "--command-config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}"] self.produce_message(topic, producer_config, "key", "message") consumer_config = [ "--bootstrap-server", ssl_broker_port, - "--property", "auto.offset.reset=earliest", - "--consumer.config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}", + "--command-property", "auto.offset.reset=earliest", + "--command-config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}", ] message = self.consume_message(topic, consumer_config) try: @@ -155,7 +155,7 @@ def broker_restart_flow(self): errors.append(constants.BROKER_RESTART_ERROR_PREFIX + str(e)) return errors - producer_config = ["--bootstrap-server", "localhost:9092", "--property", "client.id=host"] + producer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "client.id=host"] self.produce_message(constants.BROKER_RESTART_TEST_TOPIC, producer_config, "key", "message") print("Stopping Container") @@ -163,7 +163,7 @@ def broker_restart_flow(self): print("Resuming Container") self.resume_container() - consumer_config = ["--bootstrap-server", "localhost:9092", "--property", "auto.offset.reset=earliest"] + consumer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "auto.offset.reset=earliest"] message = self.consume_message(constants.BROKER_RESTART_TEST_TOPIC, consumer_config) try: self.assertEqual(message, "key:message") diff --git a/docker/test/fixtures/mode/combined/docker-compose.yml b/docker/test/fixtures/mode/combined/docker-compose.yml index 9c6d81f83bd66..8691019d02447 100644 --- a/docker/test/fixtures/mode/combined/docker-compose.yml +++ b/docker/test/fixtures/mode/combined/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: broker1: image: {$IMAGE} diff --git a/docker/test/fixtures/mode/isolated/docker-compose.yml b/docker/test/fixtures/mode/isolated/docker-compose.yml index f0216a520ac8f..2c77fc123f137 100644 --- a/docker/test/fixtures/mode/isolated/docker-compose.yml +++ b/docker/test/fixtures/mode/isolated/docker-compose.yml @@ -14,7 +14,6 @@ # limitations under the License. --- -version: '2' services: controller1: image: {$IMAGE} diff --git a/docs/api.html b/docs/api.html index 5842548503e2e..e35d79ca097f8 100644 --- a/docs/api.html +++ b/docs/api.html @@ -30,10 +30,10 @@

    javadocs.

    - To use the producer, you can use the following maven dependency: + To use the producer, add the following Maven dependency to your project:

    <dependency>
     	<groupId>org.apache.kafka</groupId>
    @@ -45,26 +45,40 @@ 

    javadocs.

    - To use the consumer, you can use the following maven dependency: + To use the consumer, add the following Maven dependency to your project:

    <dependency>
     	<groupId>org.apache.kafka</groupId>
     	<artifactId>kafka-clients</artifactId>
     	<version>{{fullDotVersion}}</version>
     </dependency>
    -

    2.3 Streams API

    +

    2.3 Share Consumer API (Preview)

    + + The Share Consumer API (Preview) enables applications within a share group to cooperatively consume and process data from Kafka topics. +

    + Examples of using the share consumer are shown in the + javadocs. +

    + To use the share consumer, add the following Maven dependency to your project: +

    <dependency>
    +	<groupId>org.apache.kafka</groupId>
    +	<artifactId>kafka-clients</artifactId>
    +	<version>{{fullDotVersion}}</version>
    +</dependency>
    + +

    2.4 Streams API

    The Streams API allows transforming streams of data from input topics to output topics.

    - Examples showing how to use this library are given in the + Examples of using this library are shown in the javadocs.

    Additional documentation on using the Streams API is available here.

    - To use Kafka Streams you can use the following maven dependency: + To use Kafka Streams, add the following Maven dependency to your project:

    <dependency>
     	<groupId>org.apache.kafka</groupId>
    @@ -75,7 +89,7 @@ 

    in the developer guide.

    - To use Kafka Streams DSL for Scala {{scalaVersion}} you can use the following maven dependency: + To use Kafka Streams DSL for Scala {{scalaVersion}}, add the following Maven dependency to your project:

    <dependency>
     	<groupId>org.apache.kafka</groupId>
    @@ -83,7 +97,7 @@ 

    2.4 Connect API

    +

    2.5 Connect API

    The Connect API allows implementing connectors that continually pull from some source data system into Kafka or push from Kafka into some sink data system.

    @@ -92,11 +106,11 @@

    javadoc.

    -

    2.5 Admin API

    +

    2.6 Admin API

    The Admin API supports managing and inspecting topics, brokers, acls, and other Kafka objects.

    - To use the Admin API, add the following Maven dependency: + To use the Admin API, add the following Maven dependency to your project:

    <dependency>
     	<groupId>org.apache.kafka</groupId>
     	<artifactId>kafka-clients</artifactId>
    diff --git a/docs/configuration.html b/docs/configuration.html
    index d38dfce2aab9c..0c114f79ef4c9 100644
    --- a/docs/configuration.html
    +++ b/docs/configuration.html
    @@ -26,9 +26,10 @@ 

    log.dirs
  • process.roles
  • controller.quorum.bootstrap.servers +
  • controller.listener.names - Topic-level configurations and defaults are discussed in more detail below. + Topic configurations and defaults are discussed in more detail below. @@ -156,6 +157,7 @@
    Updating Thread Configs
  • remote.log.reader.threads
  • remote.log.manager.copier.thread.pool.size
  • remote.log.manager.expiration.thread.pool.size
  • +
  • remote.log.manager.follower.thread.pool.size
  • Updating ConnectionQuota Configs
    @@ -185,7 +187,7 @@
    Adding and Removing Listeners
    Inter-broker listener must be configured using the static broker configuration inter.broker.listener.name or security.inter.broker.protocol. -

    3.2 Topic-Level Configs

    +

    3.2 Topic Configs

    Configurations pertinent to topics have both a server default as well an optional per-topic override. If no per-topic configuration is given the server default is used. The override can be set at topic creation time by giving one or more --config options. This example creates a topic named my-topic with a custom max message size and flush rate:
    $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic my-topic --partitions 1 \
    @@ -201,63 +203,78 @@ 

    $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my-topic --alter --delete-config max.message.bytes

    - The following are the topic-level configurations. The server's default configuration for this property is given under the Server Default Property heading. A given server default config value only applies to a topic if it does not have an explicit topic config override. + Below is the topic configuration. The server's default configuration for this property is given under the Server Default Property heading. A given server default config value only applies to a topic if it does not have an explicit topic config override. -

    3.3 Producer Configs

    +

    3.3 Group Configs

    - Below is the configuration of the producer: + Below is the group configuration: + + +

    3.4 Producer Configs

    + + Below is the producer configuration: -

    3.4 Consumer Configs

    +

    3.5 Consumer Configs

    - Below is the configuration for the consumer: + Below is the consumer and share consumer configuration: -

    3.5 Kafka Connect Configs

    - Below is the configuration of the Kafka Connect framework. +

    3.6 Kafka Connect Configs

    + Below is the Kafka Connect framework configuration. -

    3.5.1 Source Connector Configs

    - Below is the configuration of a source connector. +

    3.6.1 Source Connector Configs

    + Below is the source connector configuration. -

    3.5.2 Sink Connector Configs

    - Below is the configuration of a sink connector. +

    3.6.2 Sink Connector Configs

    + Below is the sink connector configuration. -

    3.6 Kafka Streams Configs

    - Below is the configuration of the Kafka Streams client library. +

    3.7 Kafka Streams Configs

    + Below is the Kafka Streams client library configuration. -

    3.7 Admin Configs

    - Below is the configuration of the Kafka Admin client library. +

    3.8 Admin Configs

    + Below is the Kafka Admin client library configuration. -

    3.8 MirrorMaker Configs

    +

    3.9 MirrorMaker Configs

    Below is the configuration of the connectors that make up MirrorMaker 2. -

    3.8.1 MirrorMaker Common Configs

    - Below are the common configuration properties that apply to all three connectors. +

    3.9.1 MirrorMaker Common Configs

    + Below is the common configuration that applies to all three connectors. -

    3.8.2 MirrorMaker Source Configs

    +

    3.9.2 MirrorMaker Source Configs

    Below is the configuration of MirrorMaker 2 source connector for replicating topics. -

    3.8.3 MirrorMaker Checkpoint Configs

    +

    3.9.3 MirrorMaker Checkpoint Configs

    Below is the configuration of MirrorMaker 2 checkpoint connector for emitting consumer offset checkpoints. -

    3.8.4 MirrorMaker HeartBeat Configs

    +

    3.9.4 MirrorMaker HeartBeat Configs

    Below is the configuration of MirrorMaker 2 heartbeat connector for checking connectivity between connectors and clusters. -

    3.9 System Properties

    +

    3.10 System Properties

    Kafka supports some configuration that can be enabled through Java system properties. System properties are usually set by passing the -D flag to the Java virtual machine in which Kafka components are running. Below are the supported system properties.
      +
    • +

      org.apache.kafka.sasl.oauthbearer.allowed.files

      +

      This system property is used to determine which files, if any, are allowed to be read by the SASL OAUTHBEARER plugin. This property accepts comma-separated list of files. By default the value is an empty list. +

      If users want to enable some files, users need to explicitly set the system property like below. +

      -Dorg.apache.kafka.sasl.oauthbearer.allowed.files=/tmp/token,/tmp/private_key.pem
      + + + +
      Since:4.1.0
      Default Value:
      +
    • org.apache.kafka.sasl.oauthbearer.allowed.urls

      This system property is used to set the allowed URLs as SASL OAUTHBEARER token or jwks endpoints. This property accepts comma-separated list of URLs. By default the value is an empty list. @@ -270,14 +287,26 @@

      org.apache.kafka.disallowed.login.modules

      -

      This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default com.sun.security.auth.module.JndiLoginModule loginModule is disabled. -

      If users want to enable JndiLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194. +

      This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default com.sun.security.auth.module.JndiLoginModule and com.sun.security.auth.module.LdapLoginModule loginModule is disabled. +

      If users want to enable JndiLoginModule or LdapLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194.

      -Dorg.apache.kafka.disallowed.login.modules=

      To disable more loginModules, update the system property with comma-separated loginModule names. Make sure to explicitly add JndiLoginModule module name to the comma-separated list like below.

      -Dorg.apache.kafka.disallowed.login.modules=com.sun.security.auth.module.JndiLoginModule,com.ibm.security.auth.module.LdapLoginModule,com.ibm.security.auth.module.Krb5LoginModule
      +

      The configuration is deprecated and will be removed in a future release. Please use org.apache.kafka.allowed.login.modules instead. - + + +
      Since:3.4.0
      Default Value:com.sun.security.auth.module.JndiLoginModule
      Deprecated:4.2.0
      Default Value:com.sun.security.auth.module.JndiLoginModule,com.sun.security.auth.module.LdapLoginModule
      +

    • +
    • +

      org.apache.kafka.allowed.login.modules

      +

      This system property is used to explicitly allow specific login modules in SASL JAAS configuration. It accepts a comma-separated list of login module class names. This property provides a stricter, allowed-list-based alternative to the deprecated org.apache.kafka.disallowed.login.modules property. + It is recommended to use this property to improve the security of JAAS configurations. +

      If both properties are set, org.apache.kafka.allowed.login.modules takes precedence.

      + + +
      Since:4.2.0
      Default Value:
    • @@ -298,14 +327,29 @@

      3.10 Tiered Storage Configs

      - Below are the configuration properties for Tiered Storage. +

      3.11 Tiered Storage Configs

      + Below is the Tiered Storage configuration. + +

      3.11.1 RLMM Configs

      +

      Below is the configuration for TopicBasedRemoteLogMetadataManager, which is the default implementation of RemoteLogMetadataManager.

      +

      All configurations here should start with the prefix defined by remote.log.metadata.manager.impl.prefix, for example, rlmm.config.remote.log.metadata.consume.wait.ms.

      +

      The implementation of TopicBasedRemoteLogMetadataManager needs to create admin, producer, and consumer clients for the internal topic __remote_log_metadata.

      +

      Additional configurations can be provided for different types of clients using the following configuration properties:

      +
      # Configs for admin, producer, and consumer clients
      +<rlmm.prefix>.remote.log.metadata.common.client.<kafka.property> = <value>
      +
      +# Configs only for producer client
      +<rlmm.prefix>.remote.log.metadata.producer.<kafka.property> = <value>
      +
      +# Configs only for consumer client
      +<rlmm.prefix>.remote.log.metadata.consumer.<kafka.property> = <value>
      +

      - 3.11 Configuration Providers + 3.12 Configuration Providers

      diff --git a/docs/connect.html b/docs/connect.html index 457bc575eaa5a..85571bf115cb3 100644 --- a/docs/connect.html +++ b/docs/connect.html @@ -47,7 +47,7 @@

      Running Kafka Connectbootstrap.servers - List of Kafka servers used to bootstrap connections to Kafka

    • key.converter - Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.
    • value.converter - Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.
    • -
    • plugin.path (default empty) - a list of paths that contain Connect plugins (connectors, converters, transformations). Before running quick starts, users must add the absolute path that contains the example FileStreamSourceConnector and FileStreamSinkConnector packaged in connect-file-"version".jar, because these connectors are not included by default to the CLASSPATH or the plugin.path of the Connect worker (see plugin.path property for examples).
    • +
    • plugin.path (default null) - a list of paths that contain Connect plugins (connectors, converters, transformations). Before running quick starts, users must add the absolute path that contains the example FileStreamSourceConnector and FileStreamSinkConnector packaged in connect-file-{{fullDotVersion}}.jar, because these connectors are not included by default to the CLASSPATH or the plugin.path of the Connect worker (see plugin.path property for examples).

    The important configuration options specific to standalone mode are:

    @@ -57,7 +57,7 @@

    Running Kafka ConnectThe parameters that are configured here are intended for producers and consumers used by Kafka Connect to access the configuration, offset and status topics. For configuration of the producers used by Kafka source tasks and the consumers used by Kafka sink tasks, the same parameters can be used but need to be prefixed with producer. and consumer. respectively. The only Kafka client parameter that is inherited without a prefix from the worker configuration is bootstrap.servers, which in most cases will be sufficient, since the same cluster is often used for all purposes. A notable exception is a secured cluster, which requires extra parameters to allow connections. These parameters will need to be set up to three times in the worker configuration, once for management access, once for Kafka sources and once for Kafka sinks.

    -

    Starting with 2.3.0, client configuration overrides can be configured individually per connector by using the prefixes producer.override. and consumer.override. for Kafka sources or Kafka sinks respectively. These overrides are included with the rest of the connector's configuration properties.

    +

    Client configuration overrides can be configured individually per connector by using the prefixes producer.override. and consumer.override. for Kafka sources or Kafka sinks respectively. These overrides are included with the rest of the connector's configuration properties.

    The remaining parameters are connector configuration files. Each file may either be a Java Properties file or a JSON file containing an object with the same structure as the request body of either the POST /connectors endpoint or the PUT /connectors/{name}/config endpoint (see the OpenAPI documentation). You may include as many as you want, but all will execute within the same process (on different threads). You can also choose not to specify any connector configuration files on the command line, and instead use the REST API to create connectors at runtime after your standalone worker starts.

    @@ -69,10 +69,10 @@

    Running Kafka ConnectIn particular, the following configuration parameters, in addition to the common settings mentioned above, are critical to set before starting your cluster:

      -
    • group.id (default connect-cluster) - unique name for the cluster, used in forming the Connect cluster group; note that this must not conflict with consumer group IDs
    • -
    • config.storage.topic (default connect-configs) - topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, compacted topic. You may need to manually create the topic to ensure the correct configuration as auto created topics may have multiple partitions or be automatically configured for deletion rather than compaction
    • -
    • offset.storage.topic (default connect-offsets) - topic to use for storing offsets; this topic should have many partitions, be replicated, and be configured for compaction
    • -
    • status.storage.topic (default connect-status) - topic to use for storing statuses; this topic can have multiple partitions, and should be replicated and configured for compaction
    • +
    • group.id - Unique name for the cluster, used in forming the Connect cluster group; note that this must not conflict with consumer group IDs
    • +
    • config.storage.topic - Name for the topic to use for storing connector and task configurations; this topic should have a single partition, be replicated, and be configured for compaction
    • +
    • offset.storage.topic - Name for the topic to use for storing offsets; this topic should have many partitions, be replicated, and be configured for compaction
    • +
    • status.storage.topic - Name for the topic to use for storing statuses; this topic can have multiple partitions, be replicated, and be configured for compaction

    Note that in distributed mode the connector configurations are not passed on the command line. Instead, use the REST API described below to create, modify, and destroy connectors.

    @@ -762,7 +762,7 @@

    Sink Tasks

    The flush() method is used during the offset commit process, which allows tasks to recover from failures and resume from a safe point such that no events will be missed. The method should push any outstanding data to the destination system and then block until the write has been acknowledged. The offsets parameter can often be ignored, but is useful in some cases where implementations want to store offset information in the destination store to provide exactly-once delivery. For example, an HDFS connector could do this and use atomic move operations to make sure the flush() operation atomically commits the data and offsets to a final location in HDFS.

    -
    Errant Record Reporter
    +
    Errant Record Reporter

    When error reporting is enabled for a connector, the connector can use an ErrantRecordReporter to report problems with individual records sent to a sink connector. The following example shows how a connector's SinkTask subclass might obtain and use the ErrantRecordReporter, safely handling a null reporter when the DLQ is not enabled or when the connector is installed in an older Connect runtime that doesn't have this reporter feature:

    diff --git a/docs/design.html b/docs/design.html index 4c686e68f5216..c58ead07c9ae0 100644 --- a/docs/design.html +++ b/docs/design.html @@ -340,11 +340,58 @@

    + The error handling for transactional producer has been standardized which ensures consistent behavior and clearer error handling patterns. The exception categories are now more precisely defined: +
      +
    1. RetriableException: Temporary exceptions that are retried automatically by the client. These are handled internally and don't bubble up to the application.
    2. +
    3. RefreshRetriableException: Exceptions requiring metadata refresh before retry. These are handled internally by the client after refreshing metadata and don't bubble up to the application.
    4. +
    5. AbortableException: Exceptions that require transaction abort and reprocessing. These bubble up to the application, which must handle them by aborting the transaction and resetting the consumer position.
    6. +
    7. ApplicationRecoverableException: Exceptions that bubble up to the application and require application handling. The application must implement its own recovery strategy, which must include restarting the producer.
    8. +
    9. InvalidConfigurationException: Configuration-related exceptions that bubble up to the application and require application handling. The producer doesn't need to restart, but the application may choose to restart it.
    10. +
    11. KafkaException: General Kafka exceptions that don't fit into the above categories. These bubble up to the application for handling.
    12. +
    +

    + Example template code for handling transaction exceptions link : Transaction Client Demo +

    A simple policy for handling exceptions and aborted transactions is to discard and recreate the Kafka producer and consumer objects and start afresh. As part of recreating the consumer, the consumer group will rebalance and fetch the last committed offset, which has the effect of rewinding back to the state before the transaction aborted. Alternatively, a more sophisticated application (such as the transactional message copier) can choose not to use KafkaConsumer.committed to retrieve the committed offset from Kafka, and then KafkaConsumer.seek to rewind the current position. -

    4.8 Replication

    +

    4.8 Share groups

    +

    + Share groups are available as a preview in Apache Kafka 4.1. +

    + Share groups are a new type of group, existing alongside traditional consumer groups. Share groups enable Kafka consumers to cooperatively consume and process records from topics. + They offer an alternative to traditional consumer groups, particularly when applications require finer-grained sharing of partitions and records. +

    + The fundamental differences between a share group and a consumer group are: +

      +
    • The consumers within a share group cooperatively consume records, and partitions may be assigned to multiple consumers.
    • +
    • The number of consumers in a share group can exceed the number of partitions in a topic.
    • +
    • Records are acknowledged individually, though the system is optimized for batch processing to improve efficiency.
    • +
    • Delivery attempts to consumers in a share group are counted, which enables automated handling of unprocessable records.
    • +
    +

    + All consumers in the same share group subscribed to the same topic will cooperatively consume the records of that topic. If a topic is accessed by consumers in multiple share groups, each share group + consumes from that topic independently of the others. +

    + Each consumer can dynamically set its list of subscribed topics. In practice, all consumers within a share group typically subscribe to the same topic or topics. +

    + When a consumer in a share-group fetches records, it receives available records from any of the topic-partitions matching its subscriptions. Records are acquired for delivery to this consumer with a time-limited + acquisition lock. While a record is acquired, it is unavailable to other consumers. +

    By default, the lock duration is 30 seconds, but you can control it using the group configuration parameter share.record.lock.duration.ms. The lock is released automatically once its + duration elapses, making the record available to another consumer. A consumer holding the lock can handle the record in the following ways: +

      +
    • Acknowledge successful processing of the record.
    • +
    • Release the record, making it available for another delivery attempt.
    • +
    • Reject the record, indicating it's unprocessable and preventing further delivery attempts for that record.
    • +
    • Do nothing, in which case the lock is automatically released when its duration expires.
    • +
    +

    + The Kafka cluster limits the number of records acquired for consumers for each topic-partition within a share group. Once this limit is reached, fetching operations will temporarily yield no further records + until the number of acquired records decreases (as locks naturally time out). This limit is controlled by the broker configuration property group.share.partition.max.record.locks. By limiting + the duration of the acquisition lock and automatically releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. + +

    4.9 Replication

    Kafka replicates the log for each topic's partitions across a configurable number of servers (you can set this replication factor on a topic-by-topic basis). This allows automatic failover to these replicas when a server in the cluster fails so messages remain available in the presence of failures. @@ -475,7 +522,7 @@

    < The result is that we are able to batch together many of the required leadership change notifications which makes the election process far cheaper and faster for a large number of partitions. If the controller itself fails, then another controller will be elected. -

    4.9 Log Compaction

    +

    4.10 Log Compaction

    Log compaction ensures that Kafka will always retain at least the last known value for each message key within the log of data for a single topic partition. It addresses use cases and scenarios such as restoring state after application crashes or system failure, or reloading caches after application restarts during operational maintenance. Let's dive into these use cases in more detail and then describe how compaction works. @@ -627,7 +674,7 @@

    <

    Further cleaner configurations are described here. -

    4.10 Quotas

    +

    4.11 Quotas

    Kafka cluster has the ability to enforce quotas on requests to control the broker resources used by clients. Two types of client quotas can be enforced by Kafka brokers for each group of clients sharing a quota: diff --git a/docs/implementation.html b/docs/implementation.html index a25a9b98d2275..3be539e0ba82e 100644 --- a/docs/implementation.html +++ b/docs/implementation.html @@ -59,6 +59,9 @@

    +

    The key of a record header is guaranteed to be non-null, while the value of a record header may be null. The order of headers in a record is preserved when producing and consuming.

    We use the same varint encoding as Protobuf. More information on the latter can be found here. The count of headers in a record is also encoded as a varint.

    diff --git a/docs/js/templateData.js b/docs/js/templateData.js index cb834035f44e1..5ab87d9f7c50b 100644 --- a/docs/js/templateData.js +++ b/docs/js/templateData.js @@ -17,8 +17,8 @@ limitations under the License. // Define variables for doc templates var context={ - "version": "41", - "dotVersion": "4.1", - "fullDotVersion": "4.1.0", + "version": "42", + "dotVersion": "4.2", + "fullDotVersion": "4.2.0", "scalaVersion": "2.13" }; diff --git a/docs/ops.html b/docs/ops.html index 4d6d60c6a4dbd..fcef480f2c91a 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -82,7 +82,7 @@

    $ bin/kafka-leader-election.sh --bootstrap-server localhost:9092 --election-type preferred --all-topic-partitions

    -

    Balancing Replicas Across Racks

    +

    Balancing replicas across racks

    The rack awareness feature spreads replicas of the same partition across different racks. This extends the guarantees Kafka provides for broker-failure to cover rack-failure, limiting the risk of data loss should all the brokers on a rack fail at once. The feature can also be applied to other broker groupings such as availability zones in EC2.

    You can specify that a broker belongs to a particular rack by adding a property to the broker config: @@ -107,7 +107,18 @@

    -

    Managing Consumer Groups

    +

    Managing groups

    + + With the GroupCommand tool, we can list groups of all types, including consumer groups, share groups and streams groups. Each type of group has its own tool for administering groups of that type. + + For example, to list all groups in the cluster: + +
    $ bin/kafka-groups.sh --bootstrap-server localhost:9092 --list
    +GROUP                    TYPE                     PROTOCOL
    +my-consumer-group        Consumer                 consumer
    +my-share-group           Share                    share
    + +

    Managing consumer groups

    With the ConsumerGroupCommand tool, we can list, describe, or delete the consumer groups. The consumer group can be deleted manually, or automatically when the last committed offset for that group expires. Manual deletion works only if the group does not have any active members. @@ -213,11 +224,58 @@

    For example, to reset offsets of a consumer group to the latest offset: -

    $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --group consumergroup1 --topic topic1 --to-latest
    +  
    $ bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --group my-group --topic topic1 --to-latest
     TOPIC                          PARTITION  NEW-OFFSET
     topic1                         0          0
    +

    Managing share groups

    + + NOTE: Apache Kafka 4.1 ships with a preview of share groups which is not enabled by default. To enable share groups, use the kafka-features.sh tool to upgrade to share.version=1. + For more information, please read the release notes. +

    + Use the ShareGroupCommand tool to list, describe, or delete the share groups. Only share groups without any active members can be deleted. + + For example, to list all share groups in a cluster: + +

    $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --list
    +my-share-group
    + + To view the current start offset, use the "--describe" option: + +
    $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group
    +GROUP           TOPIC           PARTITION  START-OFFSET
    +my-share-group  topic1          0          4
    + + NOTE: The admin client needs DESCRIBE access to all the topics used in the group. + + There are many --describe options that provide more detailed information about a share group: +
      +
    • --members: Describes active members in the share group. +
      bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group --members
      +GROUP           CONSUMER-ID            HOST            CLIENT-ID              #PARTITIONS  ASSIGNMENT
      +my-share-group  94wrSQNmRda9Q6sk6jMO6Q /127.0.0.1      console-share-consumer 1            topic1:0
      +my-share-group  EfI0sha8QSKSrL_-I_zaTA /127.0.0.1      console-share-consumer 1            topic1:0
      + You can see that both members have been assigned the same partition which they are sharing. +
    • +
    • --offsets: The default describe option. This provides the same output as the "--describe" option.
    • +
    • --state: Describes a summary of the state of the share group. +
      bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --describe --group my-share-group --state
      +GROUP           COORDINATOR (ID)          STATE           #MEMBERS
      +my-share-group  localhost:9092  (1)       Stable          2
      +
    • +
    + +

    To delete the offsets of individual topics in the share group, use the "--delete-offsets" option: + +

    $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --delete-offsets --group my-share-group --topic topic1
    +TOPIC           STATUS
    +topic1          Successful
    + +

    To delete one or more share groups, use "--delete" option: + +

    $ bin/kafka-share-groups.sh --bootstrap-server localhost:9092 --delete --group my-share-group
    +Deletion of requested share groups ('my-share-group') was successful.

    Expanding your cluster

    @@ -352,7 +410,7 @@

    Limiting Bandwidth Usage during Data Migration

    +

    Limiting bandwidth usage during data migration

    Kafka lets you apply a throttle to replication traffic, setting an upper bound on the bandwidth used to move replicas from machine to machine and from disk to disk. This is useful when rebalancing a cluster, adding or removing brokers or adding or removing disks, as it limits the impact these data-intensive operations will have on users.

    There are two interfaces that can be used to engage a throttle. The simplest, and safest, is to apply a throttle when invoking the kafka-reassign-partitions.sh, but kafka-configs.sh can also be used to view and alter the throttle values directly. @@ -1794,7 +1852,7 @@

    The max time in millis remote copies was throttled by a broker kafka.server:type=RemoteLogManager, name=remote-copy-throttle-time-max + + RemoteLogReader Fetch Rate And Time + The time to read data from remote storage by a broker + kafka.log.remote:type=RemoteLogManager,name=RemoteLogReaderFetchRateAndTimeMs + + + Delayed Remote List Offsets Expires Per Sec + The number of expired remote list offsets per second. Omitting 'topic=(...), partition=(...)' will yield the all-topic rate + kafka.server:type=DelayedRemoteListOffsetsMetrics,name=ExpiresPerSec,topic=([-.\w]+),partition=([0-9]+) + @@ -3011,6 +3079,11 @@
    Provisioning Nodes
    +

    Upgrade

    + +

    Apache Kafka 4.1 added support for upgrading a cluster from a static controller configuration to a dynamic controller configuration. Dynamic controller configuration allows users to add controller to and remove controller from the cluster. See the Controller membership changes section for more details.

    + +

    This feature upgrade is done by upgrading the KRaft feature version and updating the nodes' configuration.

    + +
    Describe KRaft Version
    + +

    Dynamic controller cluster was added in kraft.version=1 or release-version 4.1. To determine which kraft feature version the cluster is using you can execute the following CLI command:

    + +
    $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
    +...
    +Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0        Epoch: 7
    +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 4.0-IV3    FinalizedVersionLevel: 4.0-IV3  Epoch: 7
    + +

    If the FinalizedVersionLevel for Feature: kraft.version is 0, the version needs to be upgraded to at least 1 to support a dynamic controller cluster.

    + +
    Upgrade KRaft Version
    + +

    The KRaft feature version can be upgraded to support dynamic controller clusters by using the kafka-feature CLI command. To upgrade all of the feature versions to the latest version:

    + +
    $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --release-version 4.1
    + +

    To upgrade just the KRaft feature version:

    + +
    $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --feature kraft.version=1
    + +
    Update KRaft Config
    + +

    KRaft version 1 deprecated the controller.quorum.voters property and added the controller.quorum.bootstrap.servers property. After checking that the KRaft version has been successfully upgraded to at least version 1, remove the controller.quorum.voters property and add the controller.quorum.bootstrap.servers to all of the nodes (controllers and brokers) in the cluster.

    + +
    process.roles=...
    +node.id=...
    +controller.quorum.bootstrap.servers=controller1.example.com:9093,controller2.example.com:9093,controller3.example.com:9093
    +controller.listener.names=CONTROLLER
    + +

    Provisioning Nodes

    The bin/kafka-storage.sh random-uuid command can be used to generate a cluster ID for your new cluster. This cluster ID must be used when formatting each server in the cluster with the bin/kafka-storage.sh format command.

    This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster ID automatically. One reason for the change is that auto-formatting can sometimes obscure an error condition. This is particularly important for the metadata log maintained by the controller and broker servers. If a majority of the controllers were able to start with an empty log directory, a leader might be able to be elected with missing committed data.

    -
    Bootstrap a Standalone Controller
    +
    Bootstrap a Standalone Controller
    The recommended method for creating a new KRaft controller cluster is to bootstrap it with one voter and dynamically add the rest of the controllers. Bootstrapping the first controller can be done with the following CLI command:
    $ bin/kafka-storage.sh format --cluster-id <CLUSTER_ID> --standalone --config config/controller.properties
    This command will 1) create a meta.properties file in metadata.log.dir with a randomly generated directory.id, 2) create a snapshot at 00000000000000000000-0000000000.checkpoint with the necessary control records (KRaftVersionRecord and VotersRecord) to make this Kafka node the only voter for the quorum. -
    Bootstrap with Multiple Controllers
    +
    Bootstrap with Multiple Controllers
    The KRaft cluster metadata partition can also be bootstrapped with more than one voter. This can be done by using the --initial-controllers flag:
    CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
    @@ -3860,7 +4059,7 @@ 
    In the replica description 0@controller-0:1234:3Db5QLSqSZieL3rJBUUegA, 0 is the replica id, 3Db5QLSqSZieL3rJBUUegA is the replica directory id, controller-0 is the replica's host and 1234 is the replica's port. -
    Formatting Brokers and New Controllers
    +
    Formatting Brokers and New Controllers
    When provisioning new broker and controller nodes that we want to add to an existing Kafka cluster, use the kafka-storage.sh format command with the --no-initial-controllers flag.
    $ bin/kafka-storage.sh format --cluster-id <CLUSTER_ID> --config config/server.properties --no-initial-controllers
    @@ -3868,63 +4067,45 @@
    <

    Controller membership changes

    Static versus Dynamic KRaft Quorums
    - There are two ways to run KRaft: the old way using static controller quorums, and the new way - using KIP-853 dynamic controller quorums.

    + There are two ways to run KRaft: using KIP-853 dynamic controller quorums, or the old way + using static controller quorums.

    - When using a static quorum, the configuration file for each broker and controller must specify - the IDs, hostnames, and ports of all controllers in controller.quorum.voters.

    - - In contrast, when using a dynamic quorum, you should set - controller.quorum.bootstrap.servers instead. This configuration key need not + When using a dynamic quorum, controller.quorum.voters must not be set + and controller.quorum.bootstrap.servers is set instead. This configuration key need not contain all the controllers, but it should contain as many as possible so that all the servers can locate the quorum. In other words, its function is much like the bootstrap.servers configuration used by Kafka clients.

    + When using a static quorum, the configuration file for each broker and controller must specify + the IDs, hostnames, and ports of all controllers in controller.quorum.voters.

    + If you are not sure whether you are using static or dynamic quorums, you can determine this by running something like the following:

    -

    
    -  $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
    -

    - - If the kraft.version field is level 0 or absent, you are using a static quorum. If - it is 1 or above, you are using a dynamic quorum. For example, here is an example of a static - quorum:

    -

    
    -Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0 Epoch: 5
    -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
    -

    - - Here is another example of a static quorum:

    -

    
    -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.8-IV0 FinalizedVersionLevel: 3.8-IV0  Epoch: 5
    -

    - - Here is an example of a dynamic quorum:

    -

    
    -Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 1 Epoch: 5
    -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
    -

    - - The static versus dynamic nature of the quorum is determined at the time of formatting. - Specifically, the quorum will be formatted as dynamic if controller.quorum.voters is - not present, and if the software version is Apache Kafka 3.9 or newer. If you have - followed the instructions earlier in this document, you will get a dynamic quorum.

    - - If you would like the formatting process to fail if a dynamic quorum cannot be achieved, format your - controllers using the --feature kraft.version=1. (Note that you should not supply - this flag when formatting brokers -- only when formatting controllers.)

    - -

    
    -  $ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller.properties
    -  Cannot set kraft.version to 1 unless KIP-853 configuration is present. Try removing the --feature flag for kraft.version.
    -

    - - Note: Currently it is not possible to convert clusters using a static controller quorum to - use a dynamic controller quorum. This function will be supported in the future release. +

    $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
    +

    + If the kraft.version field is level 0 or absent, you are using a static quorum. If + it is 1 or above, you are using a dynamic quorum. For example, here is an example of a static + quorum:

    +

    Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0 Epoch: 5
    +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
    +

    + Here is another example of a static quorum:

    +

    Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.8-IV0 FinalizedVersionLevel: 3.8-IV0  Epoch: 5
    +

    + Here is an example of a dynamic quorum:

    +

    Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 1 Epoch: 5
    +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
    +

    + The static versus dynamic nature of the quorum is determined at the time of formatting. + Specifically, the quorum will be formatted as dynamic if controller.quorum.voters is + not present, and one of --standalone, --initial-controllers, or --no-initial-controllers is set. + If you have followed the instructions earlier in this document, you will get a dynamic quorum. +

    + Note: To migrate from static voter set to dynamic voter set, please refer to the Upgrade section.

    Add New Controller
    - If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller. + If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller. After starting the controller, the replication to the new controller can be monitored using the bin/kafka-metadata-quorum.sh describe --replication command. Once the new controller has caught up to the active controller, it can be added to the cluster using the bin/kafka-metadata-quorum.sh add-controller command. @@ -3934,6 +4115,9 @@
    $ bin/kafka-metadata-quorum.sh --command-config config/controller.properties --bootstrap-controller localhost:9093 add-controller
    +

    Note that if there are any configs needed to be passed to the Admin Client, like the authentication configuration, + please also include in the "controller.properties".

    +
    Remove Controller
    If the dynamic controller cluster already exists, it can be shrunk using the bin/kafka-metadata-quorum.sh remove-controller command. Until KIP-996: Pre-vote has been implemented and released, it is recommended to shutdown the controller that will be removed before running the remove-controller command. @@ -3969,7 +4153,7 @@
    $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000000.log
    -

    This command decodes and prints the records in the a cluster metadata snapshot:

    +

    This command decodes and prints the records in a cluster metadata snapshot:

    $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000100-0000000001.checkpoint
    @@ -3999,7 +4183,7 @@
    Deploying Considerations
      -
    • Kafka server's process.role should be set to either broker or controller but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments.
    • +
    • Kafka server's process.roles should be set to either broker or controller but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments.
    • For redundancy, a Kafka cluster should use 3 or more controllers, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. For the KRaft controller cluster to withstand N concurrent failures the controller cluster must include 2N + 1 controllers.
    • The Kafka controllers store all the metadata for the cluster in memory and on disk. We believe that for a typical Kafka cluster 5GB of main memory and 5GB of disk space on the metadata log director is sufficient.
    @@ -4009,7 +4193,7 @@

    In order to migrate from ZooKeeper to KRaft you need to use a bridge release. The last bridge release is Kafka 3.9. See the ZooKeeper to KRaft Migration steps in the 3.9 documentation.

    -

    6.9 Tiered Storage

    +

    6.9 Tiered Storage

    Tiered Storage Overview

    @@ -4026,12 +4210,12 @@

    Broker Configurations

    -

    By default, Kafka server will not enable tiered storage feature. remote.log.storage.system.enable +

    By default, the Kafka server will not enable the tiered storage feature. remote.log.storage.system.enable is the property to control whether to enable tiered storage functionality in a broker or not. Setting it to "true" enables this feature.

    RemoteStorageManager is an interface to provide the lifecycle of remote log segments and indexes. Kafka server - doesn't provide out-of-the-box implementation of RemoteStorageManager. Configuring remote.log.storage.manager.class.name + doesn't provide out-of-the-box implementation of RemoteStorageManager. Users must configure remote.log.storage.manager.class.name and remote.log.storage.manager.class.path to specify the implementation of RemoteStorageManager.

    @@ -4067,7 +4251,7 @@

    implemented for integration test can be used, which will create a temporary directory in local storage to simulate the remote storage.

    -

    To adopt the `LocalTieredStorage`, the test library needs to be built locally

    +

    To adopt the LocalTieredStorage, the test library needs to be built locally

    # please checkout to the specific version tag you're using before building it
     # ex: `git checkout {{fullDotVersion}}`
     $ ./gradlew clean :storage:testJar
    @@ -4116,7 +4300,7 @@

    Try to send messages to the `tieredTopic` topic to roll the log segment:

    -
    $ bin/kafka-producer-perf-test.sh --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1 --producer-props bootstrap.servers=localhost:9092
    +
    $ bin/kafka-producer-perf-test.sh --bootstrap-server localhost:9092 --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1

    Then, after the active segment is rolled, the old segment should be moved to the remote storage and get deleted. This can be verified by checking the remote log directory configured above. For example: @@ -4131,7 +4315,7 @@

    Lastly, we can try to consume some data from the beginning and print offset number, to make sure it will successfully fetch offset 0 from the remote storage.

    -
    $ bin/kafka-console-consumer.sh --topic tieredTopic --from-beginning --max-messages 1 --bootstrap-server localhost:9092 --property print.offset=true
    +
    $ bin/kafka-console-consumer.sh --topic tieredTopic --from-beginning --max-messages 1 --bootstrap-server localhost:9092 --formatter-property print.offset=true

    In KRaft mode, you can disable tiered storage at the topic level, to make the remote logs as read-only logs, or completely delete all remote logs.

    @@ -4166,7 +4350,7 @@

    While the Tiered Storage works for most use cases, it is still important to be aware of the following limitations:
    • No support for compacted topics
    • -
    • Deleting tiered storage enabled topics is required before disabling tiered storage at the broker level
    • +
    • Disabling tiered storage on all topics where it is enabled is required before disabling tiered storage at the broker level
    • Admin actions related to tiered storage feature are only supported on clients from version 3.0 onwards
    • No support for log segments missing producer snapshot file. It can happen when topic is created before v2.8.0.
    @@ -4292,10 +4476,10 @@

    Overview

    Starting from Apache Kafka 4.0, Eligible Leader Replicas (KIP-966 Part 1) - is available for the users to an improvement to Kafka replication. As the "strict min ISR" rule has been generally applied, which means the high watermark for the data partition can't advance - if the size of the ISR is smaller than the min ISR(min.insync.replicas), it makes some replicas that are not in the ISR safe to become the leader. The KRaft controller - stores such replicas in the PartitionRecord field called Eligible Leader Replicas. During the leader election, the controller will select the leaders - with the following order:

    + is available for the users to an improvement to Kafka replication (ELR is enabled by default on new clusters starting 4.1). As the "strict min ISR" rule has been generally applied, + which means the high watermark for the data partition can't advance if the size of the ISR is smaller than the min ISR(min.insync.replicas), it makes some replicas + that are not in the ISR safe to become the leader. The KRaft controller stores such replicas in the PartitionRecord field called Eligible Leader Replicas. During the + leader election, the controller will select the leaders with the following order:

    • If ISR is not empty, select one of them.
    • If ELR is not empty, select one that is not fenced.
    • @@ -4309,9 +4493,16 @@

      Tool

      -

      The ELR fields can be checked through the API DescribeTopicPartitions. The admin client can fetch the ELR info by describing the topics. - Also note that, if min.insync.replicas is updated for a topic, the ELR field will be cleaned. If cluster default min ISR is updated, - all the ELR fields will be cleaned.

      +

      The ELR fields can be checked through the API DescribeTopicPartitions. The admin client can fetch the ELR info by describing the topics.

      +

      Note that when the ELR feature is enabled:

      +
        +
      • The cluster-level min.insync.replicas config will be added if there is not any. The value is the same as the static config in the active controller.
      • +
      • The removal of min.insync.replicas config at the cluster-level is not allowed.
      • +
      • If the cluster-level min.insync.replicas is updated, even if the value is unchanged, all the ELR state will be cleaned.
      • +
      • The previously set min.insync.replicas value at the broker-level config will be removed. Please set at the cluster-level if necessary.
      • +
      • The alteration of min.insync.replicas config at the broker-level is not allowed.
      • +
      • If min.insync.replicas is updated for a topic, the ELR state will be cleaned.
      • +
      diff --git a/docs/security.html b/docs/security.html index 8c0d345ec8e24..9364a05e40af7 100644 --- a/docs/security.html +++ b/docs/security.html @@ -510,8 +510,8 @@
      SSL key and certificates in PEM format

      Examples using console-producer and console-consumer: -
      $ bin/kafka-console-producer.sh --bootstrap-server localhost:9093 --topic test --producer.config client-ssl.properties
      -$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9093 --topic test --consumer.config client-ssl.properties
      +
      $ bin/kafka-console-producer.sh --bootstrap-server localhost:9093 --topic test --command-config client-ssl.properties
      +$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9093 --topic test --command-config client-ssl.properties

      7.4 Authentication using SASL

      @@ -556,8 +556,8 @@

      See GSSAPI (Kerberos), PLAIN, - SCRAM or - OAUTHBEARER for example broker configurations.

      + SCRAM, or + non-production/production OAUTHBEARER for example broker configurations.

    • JAAS configuration for Kafka clients
      @@ -579,8 +579,8 @@

      See GSSAPI (Kerberos), PLAIN, - SCRAM or - OAUTHBEARER for example configurations.

    • + SCRAM, or + non-production/production OAUTHBEARER for example client configurations.

    • JAAS configuration using static config file
      To configure SASL authentication on the clients using static JAAS config file: @@ -589,8 +589,8 @@

      KafkaClient for the selected mechanism as described in the examples for setting up GSSAPI (Kerberos), PLAIN, - SCRAM or - OAUTHBEARER. + SCRAM, or + non-production/production OAUTHBEARER. For example, GSSAPI credentials may be configured as:
      KafkaClient {
      @@ -905,10 +905,13 @@ 

      RFC 7628. The default OAUTHBEARER implementation in Kafka creates and validates Unsecured JSON Web Tokens and is only suitable for use in non-production Kafka installations. Refer to Security Considerations - for more details.

      + for more details. Recent versions of Apache Kafka have added production-ready OAUTHBEARER implementations that support interaction with an OAuth 2.0-standards + compliant identity provider. Both modes are described in the following, noted where applicable.

      Under the default implementation of principal.builder.class, the principalName of OAuthBearerToken is used as the authenticated Principal for configuration of ACLs etc.
        -
      1. Configuring Kafka Brokers
        +
      2. Configuring Non-production Kafka Brokers
        +

        The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. + While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.

        1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example:
          KafkaServer {
          @@ -917,7 +920,40 @@ 

          The property unsecuredLoginStringClaim_sub in the KafkaServer section is used by the broker when it initiates connections to other brokers. In this example, admin will appear in the - subject (sub) claim and will be the user for inter-broker communication.
        2. + subject (sub) claim and will be the user for inter-broker communication. + +

          Here are the various supported JAAS module options on the broker side for Unsecured JSON Web Token validation: + + + + + + + + + + + + + + + + + + + + + +
          JAAS Module Option for Unsecured Token ValidationDocumentation
          unsecuredValidatorPrincipalClaimName="value"Set to a non-empty value if you wish a particular String claim + holding a principal name to be checked for existence; the default is to check + for the existence of the 'sub' claim.
          unsecuredValidatorScopeClaimName="value"Set to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
          unsecuredValidatorRequiredScope="value"Set to a space-delimited list of scope values if you wish the + String/String List claim holding the token scope to be checked to + make sure it contains certain values.
          unsecuredValidatorAllowableClockSkewMs="value"Set to a positive integer value if you wish to allow up to some number of + positive milliseconds of clock skew (the default is 0).
          +

          +
        3. Pass the JAAS config file location as JVM parameter to each Kafka broker:
          -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
        4. Configure SASL port and SASL mechanisms in server.properties as described here. For example: @@ -927,8 +963,40 @@

    • +
    • Configuring Production Kafka Brokers
      +
        +
      1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: +
        KafkaServer {
        +    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
        +};
      2. +
      3. Pass the JAAS config file location as JVM parameter to each Kafka broker: +
        -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
      4. +
      5. Configure SASL port and SASL mechanisms in server.properties as described here. For example: +
        listeners=SASL_SSL://host.name:port
        +security.inter.broker.protocol=SASL_SSL
        +sasl.mechanism.inter.broker.protocol=OAUTHBEARER
        +sasl.enabled.mechanisms=OAUTHBEARER
        +listener.name.<listener name>.oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
        +listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwks.endpoint.url=https://example.com/oauth2/v1/keys
        + + The OAUTHBEARER broker configuration includes: + +
          +
        • sasl.oauthbearer.clock.skew.seconds
        • +
        • sasl.oauthbearer.expected.audience
        • +
        • sasl.oauthbearer.expected.issuer
        • +
        • sasl.oauthbearer.jwks.endpoint.refresh.ms
        • +
        • sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms
        • +
        • sasl.oauthbearer.jwks.endpoint.retry.backoff.ms
        • +
        • sasl.oauthbearer.jwks.endpoint.url
        • +
        • sasl.oauthbearer.scope.claim.name
        • +
        • sasl.oauthbearer.sub.claim.name
        • +
        +
      6. +
      +
    • -
    • Configuring Kafka Clients
      +
    • Configuring Non-production Kafka Clients
      To configure SASL authentication on the clients:
      1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. @@ -943,6 +1011,61 @@

        sub) claims in sasl.jaas.config.

        +

        The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. + While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.

        +

        Here are the various supported JAAS module options on the client side (and on the broker side if OAUTHBEARER is the inter-broker protocol): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        JAAS Module Option for Unsecured Token CreationDocumentation
        unsecuredLoginStringClaim_<claimname>="value"Creates a String claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
        unsecuredLoginNumberClaim_<claimname>="value"Creates a Number claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
        unsecuredLoginListClaim_<claimname>="value"Creates a String List claim with the given name and values parsed + from the given value where the first character is taken as the delimiter. For + example: unsecuredLoginListClaim_fubar="|value1|value2". Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
        unsecuredLoginExtension_<extensionname>="value"Creates a String extension with the given name and value. + For example: unsecuredLoginExtension_traceId="123". A valid extension name + is any sequence of lowercase or uppercase alphabet characters. In addition, the "auth" extension name is reserved. + A valid extension value is any combination of characters with ASCII codes 1-127. +
        unsecuredLoginPrincipalClaimNameSet to a custom claim name if you wish the name of the String + claim holding the principal name to be something other than 'sub'.
        unsecuredLoginLifetimeSecondsSet to an integer value if the token expiration is to be set to something + other than the default value of 3600 seconds (which is 1 hour). The + 'exp' claim will be set to reflect the expiration time.
        unsecuredLoginScopeClaimNameSet to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
        +

        +

        JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named KafkaClient. This option allows only one user for all client connections from a JVM.

      2. @@ -953,101 +1076,64 @@

    • -
    • Unsecured Token Creation Options for SASL/OAUTHBEARER
      -
        -
      • The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. - While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.
      • -
      • Here are the various supported JAAS module options on the client side (and on the broker side if OAUTHBEARER is the inter-broker protocol): - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        JAAS Module Option for Unsecured Token CreationDocumentation
        unsecuredLoginStringClaim_<claimname>="value"Creates a String claim with the given name and value. Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
        unsecuredLoginNumberClaim_<claimname>="value"Creates a Number claim with the given name and value. Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
        unsecuredLoginListClaim_<claimname>="value"Creates a String List claim with the given name and values parsed - from the given value where the first character is taken as the delimiter. For - example: unsecuredLoginListClaim_fubar="|value1|value2". Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
        unsecuredLoginExtension_<extensionname>="value"Creates a String extension with the given name and value. - For example: unsecuredLoginExtension_traceId="123". A valid extension name - is any sequence of lowercase or uppercase alphabet characters. In addition, the "auth" extension name is reserved. - A valid extension value is any combination of characters with ASCII codes 1-127. -
        unsecuredLoginPrincipalClaimNameSet to a custom claim name if you wish the name of the String - claim holding the principal name to be something other than 'sub'.
        unsecuredLoginLifetimeSecondsSet to an integer value if the token expiration is to be set to something - other than the default value of 3600 seconds (which is 1 hour). The - 'exp' claim will be set to reflect the expiration time.
        unsecuredLoginScopeClaimNameSet to a custom claim name if you wish the name of the String or - String List claim holding any token scope to be something other than - 'scope'.
        -
      • -
      -
    • -
    • Unsecured Token Validation Options for SASL/OAUTHBEARER
      -
        -
      • Here are the various supported JAAS module options on the broker side for Unsecured JSON Web Token validation: - - - - - - - - - - - - - - - - - - - - - -
        JAAS Module Option for Unsecured Token ValidationDocumentation
        unsecuredValidatorPrincipalClaimName="value"Set to a non-empty value if you wish a particular String claim - holding a principal name to be checked for existence; the default is to check - for the existence of the 'sub' claim.
        unsecuredValidatorScopeClaimName="value"Set to a custom claim name if you wish the name of the String or - String List claim holding any token scope to be something other than - 'scope'.
        unsecuredValidatorRequiredScope="value"Set to a space-delimited list of scope values if you wish the - String/String List claim holding the token scope to be checked to - make sure it contains certain values.
        unsecuredValidatorAllowableClockSkewMs="value"Set to a positive integer value if you wish to allow up to some number of - positive milliseconds of clock skew (the default is 0).
        +
      • Configuring Production Kafka Clients
        + To configure SASL authentication on the clients: +
          +
        1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. + The login module describes how the clients like producer and consumer can connect to the Kafka Broker. + The following is an example configuration for a client for the OAUTHBEARER mechanisms: +
          sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
          + +

          JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers + as described here. Clients use the login section named + KafkaClient. This option allows only one user for all client connections from a JVM.

        2. +
        3. Configure the following properties in producer.properties or consumer.properties. For example, if using the OAuth client_credentials grant type + to communicate with the OAuth identity provider, the configuration might look like this: +
          security.protocol=SASL_SSL
          +sasl.mechanism=OAUTHBEARER
          +sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
          +sasl.oauthbearer.client.credentials.client.id=jdoe
          +sasl.oauthbearer.client.credentials.client.secret=$3cr3+
          +sasl.oauthbearer.scope=my-application-scope
          +sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
          + + Or, if using the OAuth urn:ietf:params:oauth:grant-type:jwt-bearer grant type + to communicate with the OAuth identity provider, the configuration might look like this: +
          security.protocol=SASL_SSL
          +sasl.mechanism=OAUTHBEARER
          +sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
          +sasl.oauthbearer.assertion.private.key.file=/path/to/private.key
          +sasl.oauthbearer.assertion.algorithm=RS256
          +sasl.oauthbearer.assertion.claim.exp.seconds=600
          +sasl.oauthbearer.assertion.template.file=/path/to/template.json
          +sasl.oauthbearer.scope=my-application-scope
          +sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
          + The OAUTHBEARER client configuration includes: + +
            +
          • sasl.oauthbearer.assertion.algorithm
          • +
          • sasl.oauthbearer.assertion.claim.aud
          • +
          • sasl.oauthbearer.assertion.claim.exp.seconds
          • +
          • sasl.oauthbearer.assertion.claim.iss
          • +
          • sasl.oauthbearer.assertion.claim.jti.include
          • +
          • sasl.oauthbearer.assertion.claim.nbf.seconds
          • +
          • sasl.oauthbearer.assertion.claim.sub
          • +
          • sasl.oauthbearer.assertion.file
          • +
          • sasl.oauthbearer.assertion.private.key.file
          • +
          • sasl.oauthbearer.assertion.private.key.passphrase
          • +
          • sasl.oauthbearer.assertion.template.file
          • +
          • sasl.oauthbearer.client.credentials.client.id
          • +
          • sasl.oauthbearer.client.credentials.client.secret
          • +
          • sasl.oauthbearer.header.urlencode
          • +
          • sasl.oauthbearer.jwt.retriever.class
          • +
          • sasl.oauthbearer.jwt.validator.class
          • +
          • sasl.oauthbearer.scope
          • +
          • sasl.oauthbearer.token.endpoint.url
          • +
        4. -
        5. The default unsecured SASL/OAUTHBEARER implementation may be overridden (and must be overridden in production environments) - using custom login and SASL Server callback handlers.
        6. -
        7. For more details on security considerations, refer to RFC 6749, Section 10.
        8. -
      +
    • The default implementation of SASL/OAUTHBEARER depends on the jackson-databind library. + Since it's an optional dependency, users have to configure it as a dependency via their build tool.
    • +
    • Token Refresh for SASL/OAUTHBEARER
      Kafka periodically refreshes any token before it expires so that the client can continue to make @@ -1125,7 +1211,7 @@

  • Follow the mechanism-specific steps in GSSAPI (Kerberos), PLAIN, - SCRAM and OAUTHBEARER + SCRAM, and non-production/production OAUTHBEARER to configure SASL for the enabled mechanisms.
  • @@ -1838,7 +1924,7 @@
    Lambda Architecture. + to the stream processing pipeline, known as the Lambda Architecture. Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline.
    diff --git a/docs/streams/developer-guide/app-reset-tool.html b/docs/streams/developer-guide/app-reset-tool.html index 48a40043e701a..7f299ac994160 100644 --- a/docs/streams/developer-guide/app-reset-tool.html +++ b/docs/streams/developer-guide/app-reset-tool.html @@ -80,9 +80,12 @@

    Step 1: Run the application reset toolOptional configuration parameters
  • Kafka consumers and producer configuration parameters @@ -297,12 +297,12 @@

    num.standby.replicascommit.interval.ms Low The frequency in milliseconds with which to save the position (offsets in source topics) of tasks. - 30000 (30 seconds) + 30000 (30 seconds) (at-least-once) / 100 (exactly-once) default.deserialization.exception.handler (Deprecated. Use deserialization.exception.handler instead.) Medium Exception handling class that implements the DeserializationExceptionHandler interface. - LogAndContinueExceptionHandler + LogAndFailExceptionHandler default.key.serde Medium @@ -327,11 +327,10 @@

    num.standby.replicasnull - default.dsl.store + default.dsl.store (Deprecated. Use dsl.store.suppliers.class instead.) Low - [DEPRECATED] The default state store type used by DSL operators. Deprecated in - favor of dsl.store.suppliers.class + The default state store type used by DSL operators. "ROCKS_DB" @@ -491,58 +490,68 @@

    num.standby.replicas-1 - retry.backoff.ms + repartition.purge.interval.ms + Low + The frequency in milliseconds with which to delete fully consumed records from repartition topics. Purging will occur after at least this value since the last purge, but may be delayed until later. + 30000 (30 seconds) + + retry.backoff.ms Low The amount of time in milliseconds, before a request is retried. 100 - rocksdb.config.setter + rocksdb.config.setter Medium The RocksDB configuration. null - state.cleanup.delay.ms + state.cleanup.delay.ms Low The amount of time in milliseconds to wait before deleting state when a partition has migrated. 600000 (10 minutes) - state.dir + state.dir High Directory location for state stores. /${java.io.tmpdir}/kafka-streams - task.assignor.class + task.assignor.class Medium A task assignor class or class name implementing the TaskAssignor interface. The high-availability task assignor. - task.timeout.ms + task.timeout.ms Medium The maximum amount of time in milliseconds a task might stall due to internal errors and retries until an error is raised. For a timeout of 0 ms, a task would raise an error for the first internal error. For any timeout larger than 0 ms, a task will retry at least once before an error is raised. 300000 (5 minutes) - topology.optimization + topology.optimization Medium A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: StreamsConfig.NO_OPTIMIZATION (none), StreamsConfig.OPTIMIZE (all) or a comma separated list of specific optimizations: StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS (reuse.ktable.source.topics), StreamsConfig.MERGE_REPARTITION_TOPICS (merge.repartition.topics), StreamsConfig.SINGLE_STORE_SELF_JOIN (single.store.self.join). "NO_OPTIMIZATION" - upgrade.from + upgrade.from Medium The version you are upgrading from during a rolling upgrade. See Upgrade From null - windowstore.changelog.additional.retention.ms + windowstore.changelog.additional.retention.ms Low Added to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. 86400000 (1 day) - window.size.ms + window.size.ms (Deprecated. See Window Serdes for alternatives.) Low Sets window size for the deserializer in order to calculate window end times. null + windowed.inner.class.serde (Deprecated. See Window Serdes for alternatives.) + Low + Serde for the inner class of a windowed record. Must implement the Serde interface. + null + +
    +

    group.protocol

    +
    +
    +

    + The group protocol used by the Kafka Streams client used for coordination. + It determines how the client will communicate with the Kafka brokers and other clients in the same group. + The default value is "classic", which is the classic consumer group protocol. + Can be set to "streams" (requires broker-side enablement) to enable the new Kafka Streams group protocol. + Note that the "streams" rebalance protocol is an Early Access feature and should not be used in production. +

    +
    +
    +

    rack.aware.assignment.non_overlap_cost

    @@ -1199,24 +1222,12 @@

    topology.optimization -

    windowed.inner.class.serde

    -
    -
    -

    - Serde for the inner class of a windowed record. Must implement the org.apache.kafka.common.serialization.Serde interface. -

    -

    - Note that this config is only used by plain consumer/producer clients that set a windowed de/serializer type via configs. For Kafka Streams applications that deal with windowed types, you must pass in the inner serde type when you instantiate the windowed serde object for your topology. -

    -
    -

  • JSON
  • -
  • Implementing custom serdes
  • +
  • Window Serdes
  • +
  • Implementing custom serdes
  • -
  • Kafka Streams DSL for Scala Implicit Serdes
  • +
  • Kafka Streams DSL for Scala Implicit Serdes
  • Configuring Serdes

    @@ -103,7 +104,7 @@

    Primitive and basic types<dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> - <version>2.8.0</version> + <version>{{fullDotVersion}}</version> </dependency>

    This artifact provides the following serde implementations under the package org.apache.kafka.common.serialization, which you can leverage when e.g., defining default serializers in your Streams configuration.

    @@ -163,6 +164,76 @@

    JSONAs shown in the example, you can use JSONSerdes inner classes Serdes.serdeFrom(<serializerInstance>, <deserializerInstance>) to construct JSON compatible serializers and deserializers.

    +
    +

    Window Serdes

    +

    Apache Kafka Streams includes serde implementations for windowed types in + its kafka-streams Maven artifact:

    +
    <dependency>
    +    <groupId>org.apache.kafka</groupId>
    +    <artifactId>kafka-streams</artifactId>
    +    <version>{{fullDotVersion}}</version>
    +</dependency>
    +

    This artifact provides the following windowed serde implementations under the package org.apache.kafka.streams.kstream:

    + +

    Serdes:

    +
      +
    • WindowedSerdes.TimeWindowedSerde<T>
    • +
    • WindowedSerdes.SessionWindowedSerde<T>
    • +
    + +

    Serializers:

    +
      +
    • TimeWindowedSerializer<T>
    • +
    • SessionWindowedSerializer<T>
    • +
    + +

    Deserializers:

    +
      +
    • TimeWindowedDeserializer<T>
    • +
    • SessionWindowedDeserializer<T>
    • +
    +

    Usage in Code

    +

    When using windowed serdes in your application code, you typically create instances via constructors or factory methods:

    +
    // Time windowed serde - using factory method
    +Serde<Windowed<String>> timeWindowedSerde = 
    +    WindowedSerdes.timeWindowedSerdeFrom(String.class, 500L);
    +
    +// Time windowed serde - using constructor
    +Serde<Windowed<String>> timeWindowedSerde2 = 
    +    new WindowedSerdes.TimeWindowedSerde<>(Serdes.String(), 500L);
    +
    +// Session windowed serde - using factory method
    +Serde<Windowed<String>> sessionWindowedSerde = 
    +    WindowedSerdes.sessionWindowedSerdeFrom(String.class);
    +
    +// Session windowed serde - using constructor  
    +Serde<Windowed<String>> sessionWindowedSerde2 = 
    +    new WindowedSerdes.SessionWindowedSerde<>(Serdes.String());
    +
    +// Using individual serializers/deserializers
    +TimeWindowedSerializer<String> serializer = new TimeWindowedSerializer<>(Serdes.String().serializer());
    +TimeWindowedDeserializer<String> deserializer = new TimeWindowedDeserializer<>(Serdes.String().deserializer(), 500L);
    + +

    Usage in Command Line

    +

    When using command-line tools (like bin/kafka-console-consumer.sh), you can configure windowed deserializers by passing the inner class and window size via configuration properties. The property names use a prefix pattern:

    +
    # Time windowed deserializer configuration
    +--formatter-property print.key=true \
    +--formatter-property key.deserializer=org.apache.kafka.streams.kstream.TimeWindowedDeserializer \
    +--formatter-property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer \
    +--formatter-property key.deserializer.window.size.ms=500
    +
    +# Session windowed deserializer configuration  
    +--formatter-property print.key=true \
    +--formatter-property key.deserializer=org.apache.kafka.streams.kstream.SessionWindowedDeserializer \
    +--formatter-property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer
    + +

    Deprecated Configs

    +

    The following StreamsConfig parameters are deprecated in favor of passing parameters directly to serializer/deserializer constructors:

    +
      +
    • StreamsConfig.WINDOWED_INNER_CLASS_SERDE is deprecated in favor of TimeWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS and TimeWindowedDeserializer.WINDOWED_INNER_DESERIALIZER_CLASS
    • +
    • StreamsConfig.WINDOW_SIZE_MS_CONFIG is deprecated in favor of TimeWindowedDeserializer.WINDOW_SIZE_MS_CONFIG
    • +
    +

    Implementing custom Serdes

    If you need to implement custom Serdes, your best starting point is to take a look at the source code references of diff --git a/docs/streams/developer-guide/dsl-api.html b/docs/streams/developer-guide/dsl-api.html index 8e8a36f76c075..4de5389ac75f4 100644 --- a/docs/streams/developer-guide/dsl-api.html +++ b/docs/streams/developer-guide/dsl-api.html @@ -3130,15 +3130,20 @@

    Operations and concepts

    Processor (provided by a given ProcessorSupplier);
  • KStream#processValues: Process all records in a stream, one record at a time, by applying a - FixedKeyProcessor (provided by a given FixedKeyProcessorSupplier); + FixedKeyProcessor (provided by a given FixedKeyProcessorSupplier) + [CAUTION: If you are deploying a new Kafka Streams application, and you are using the + "merge repartition topics" optimization, you should enable the fix for + KAFKA-19668 to avoid compatibility + issues for future upgrades to newer versions of Kafka Streams; + For more details, see the migration guide below];
  • Processor: A processor of key-value pair records;
  • ContextualProcessor: An abstract implementation of Processor that manages the - ProcessorContext instance. + ProcessorContext instance;
  • FixedKeyProcessor: A processor of key-value pair records where keys are immutable;
  • ContextualFixedKeyProcessor: An abstract implementation of FixedKeyProcessor that - manages the FixedKeyProcessorContext instance. + manages the FixedKeyProcessorContext instance;
  • ProcessorSupplier: A processor supplier that can create one or more Processor instances; and
  • @@ -3456,6 +3461,25 @@

    The Processor API now serves as a unified replacement for all these methods. It simplifies the API surface while maintaining support for both stateless and stateful operations.

    + +

    CAUTION: If you are using KStream.transformValues() and you have the "merge repartition topics" + optimization enabled, rewriting your program to KStream.processValues() might not be safe due to + KAFKA-19668. For this case, you should not upgrade + to Kafka Streams 4.0.0 or 4.1.0, but use Kafka Streams 4.0.1 instead, which contains a fix. + Note, that the fix is not enabled by default for backward compatibility reasons, and you would need to + enable the fix by setting config __enable.process.processValue.fix__ = true and pass it + into StreamsBuilder() constructor.

    +
    final Properties properties = new Properties();
    +properties.put(StreamsConfig.APPLICATION_ID_CONFIG, ...);
    +properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ...);
    +properties.put(TopologyConfig.InternalConfig.ENABLE_PROCESS_PROCESSVALUE_FIX, true);
    +
    +final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(properties)));
    + +

    It is recommended, that you compare the output of Topology.describe() for the old and new topology, + to verify if the rewrite to processValues() is correct, and that it does not introduce any incompatibilities. + You should also test the upgrade in a non-production environment.

    +

    Migration Examples

    To migrate from the deprecated transform, transformValues, flatTransform, and flatTransformValues methods to the Processor API (PAPI) in Kafka Streams, let's resume the diff --git a/docs/streams/developer-guide/security.html b/docs/streams/developer-guide/security.html index 13e671fa93120..bae4d90b8b24a 100644 --- a/docs/streams/developer-guide/security.html +++ b/docs/streams/developer-guide/security.html @@ -70,7 +70,65 @@ the ACL set so that the application has the permissions to create, read and write internal topics.

    -

    To avoid providing this permission to your application, you can create the required internal topics manually. +

    +

    If the streams rebalance protocol is enabled by setting group.protocol=streams, the following ACLs are required on the topic and group resources:

    +
    + +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    API PROTOCOLOPERATIONResourceNotes
    STREAMS_GROUP_HEARTBEATReadGroupRequired for the application's streams group
    STREAMS_GROUP_HEARTBEATCreateCluster or Topic + Required only if auto-creating internal topics.
    + • Create on Cluster resource
    + • or Create on all topics in StateChangelogTopics and RepartitionSourceTopics
    + Not required if internal topics are pre-created +
    STREAMS_GROUP_HEARTBEATDescribeTopicRequired for all topics used in the application's topology, when first joining.
    STREAMS_GROUP_DESCRIBEDescribeGroupRequired for the application's streams group
    STREAMS_GROUP_DESCRIBEDescribeTopicRequired for all topics used in the group's topology
    + +

    As mentioned earlier, Kafka Streams applications need appropriate ACLs to create internal topics when running against a secured Kafka cluster. + To avoid providing this permission to your application, you can create the required internal topics manually. If the internal topics exist, Kafka Streams will not try to recreate them. Note, that the internal repartition and changelog topics must be created with the correct number of partitions—otherwise, Kafka Streams will fail on startup. The topics must be created with the same number of partitions as your input topic, or if there are multiple topics, the maximum number of partitions across all input topics. @@ -89,6 +147,7 @@ and KIP-290 for details).

    +

    Security example

    The purpose is to configure a Kafka Streams application to enable client authentication and encrypt data-in-transit when diff --git a/docs/streams/quickstart.html b/docs/streams/quickstart.html index 56ab128e2a5cb..b6bd6a51e637f 100644 --- a/docs/streams/quickstart.html +++ b/docs/streams/quickstart.html @@ -175,10 +175,10 @@

    Step 4: St
    $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
         --topic streams-wordcount-output \
         --from-beginning \
    -    --property print.key=true \
    -    --property print.value=true \
    -    --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    -    --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
    + --formatter-property print.key=true \ + --formatter-property print.value=true \ + --formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \ + --formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer

    Step 5: Process some data

    @@ -197,10 +197,10 @@

    Step 5
    $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
         --topic streams-wordcount-output \
         --from-beginning \
    -    --property print.key=true \
    -    --property print.value=true \
    -    --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    -    --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
    +    --formatter-property print.key=true \
    +    --formatter-property print.value=true \
    +    --formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    +    --formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
     
     all	    1
     streams	1
    @@ -225,10 +225,10 @@ 

    Step 5
    $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
         --topic streams-wordcount-output \
         --from-beginning \
    -    --property print.key=true \
    -    --property print.value=true \
    -    --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    -    --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
    +    --formatter-property print.key=true \
    +    --formatter-property print.value=true \
    +    --formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    +    --formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
     
     all	    1
     streams	1
    @@ -255,10 +255,10 @@ 

    Step 5
    $ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 \
         --topic streams-wordcount-output \
         --from-beginning \
    -    --property print.key=true \
    -    --property print.value=true \
    -    --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    -    --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
    +    --formatter-property print.key=true \
    +    --formatter-property print.value=true \
    +    --formatter-property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    +    --formatter-property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
     
     all	    1
     streams	1
    diff --git a/docs/streams/upgrade-guide.html b/docs/streams/upgrade-guide.html
    index a203190756340..b875fcddc891c 100644
    --- a/docs/streams/upgrade-guide.html
    +++ b/docs/streams/upgrade-guide.html
    @@ -35,9 +35,8 @@ 

    Upgrade Guide and API Changes

    Upgrading from any older version to {{fullDotVersion}} is possible: if upgrading from 3.4 or below, you will need to do two rolling bounces, where during the first rolling bounce phase you set the config upgrade.from="older version" - (possible values are "0.10.0" - "3.4") and during the second you remove it. This is required to safely handle 3 changes. The first is introduction of the new cooperative rebalancing protocol of the embedded consumer. The second is a change in foreign-key join serialization format. - Note that you will remain using the old eager rebalancing protocol if you skip or delay the second rolling bounce, but you can safely switch over to cooperative at any time once the entire group is on 2.4+ by removing the config value and bouncing. For more details please refer to - KIP-429. The third is a change in the serialization format for an internal repartition topic. For more details, please refer to KIP-904: + (possible values are "2.4" - "3.4") and during the second you remove it. This is required to safely handle 2 changes. The first is a change in foreign-key join serialization format. + The second is a change in the serialization format for an internal repartition topic. For more details, please refer to KIP-904:

    • prepare your application instances for a rolling bounce and make sure that config upgrade.from is set to the version from which it is being upgrade.
    • @@ -45,24 +44,12 @@

      Upgrade Guide and API Changes

    • prepare your newly deployed {{fullDotVersion}} application instances for a second round of rolling bounces; make sure to remove the value for config upgrade.from
    • bounce each instance of your application once more to complete the upgrade
    -

    As an alternative, an offline upgrade is also possible. Upgrading from any versions as old as 0.10.0.x to {{fullDotVersion}} in offline mode require the following steps:

    +

    As an alternative, an offline upgrade is also possible. Upgrading from any versions as old as 0.11.0.x to {{fullDotVersion}} in offline mode require the following steps:

      -
    • stop all old (e.g., 0.10.0.x) application instances
    • +
    • stop all old (e.g., 0.11.0.x) application instances
    • update your code and swap old code and jar file with new code and new jar file
    • restart all new ({{fullDotVersion}}) application instances
    -

    - Note: The cooperative rebalancing protocol has been the default since 2.4, but we have continued to support the - eager rebalancing protocol to provide users an upgrade path. This support will be dropped in a future release, - so any users still on the eager protocol should prepare to finish upgrading their applications to the cooperative protocol in version 3.1. - This only affects users who are still on a version older than 2.4, and users who have upgraded already but have not yet - removed the upgrade.from config that they set when upgrading from a version below 2.4. - Users fitting into the latter case will simply need to unset this config when upgrading beyond 3.1, - while users in the former case will need to follow a slightly different upgrade path if they attempt to upgrade from 2.3 or below to a version above 3.1. - Those applications will need to go through a bridge release, by first upgrading to a version between 2.4 - 3.1 and setting the upgrade.from config, - then removing that config and upgrading to the final version above 3.1. See KAFKA-8575 - for more details. -

    For a table that shows Streams API compatibility with Kafka broker versions, see Broker Compatibility.

    @@ -121,25 +108,113 @@

    <

    Since 2.6.0 release, Kafka Streams depends on a RocksDB version that requires MacOS 10.14 or higher.

    +

    Streams API changes in 4.1.0

    + +

    Note: Kafka Streams 4.1.0 contains a critical memory leak bug (KAFKA-19748) that affects users of range scans and certain DSL operators (session windows, sliding windows, stream-stream joins, foreign-key joins). Users running Kafka Streams should consider upgrading directly to 4.1.1 when available.

    + +

    Early Access of the Streams Rebalance Protocol

    +

    - To run a Kafka Streams application version 2.2.1, 2.3.0, or higher a broker version 0.11.0 or higher is required - and the on-disk message format must be 0.11 or higher. - Brokers must be on version 0.10.1 or higher to run a Kafka Streams application version 0.10.1 to 2.2.0. - Additionally, on-disk message format must be 0.10 or higher to run a Kafka Streams application version 1.0 to 2.2.0. - For Kafka Streams 0.10.0, broker version 0.10.0 or higher is required. + The Streams Rebalance Protocol is a broker-driven rebalancing system designed specifically for Kafka + Streams applications. Following the pattern of KIP-848, which moved rebalance coordination of plain consumers + from clients to brokers, KIP-1071 extends this model to Kafka Streams workloads. Instead of clients + computing new assignments on the client during rebalance events involving all members of the group, assignments are + computed continuously on the broker. Instead of using a consumer group, the streams application registers as a + streams group with the broker, which manages and exposes all metadata required for coordination of the + streams application instances.

    - In deprecated KStreamBuilder class, when a KTable is created from a source topic via KStreamBuilder.table(), its materialized state store - will reuse the source topic as its changelog topic for restoring, and will disable logging to avoid appending new updates to the source topic; in the StreamsBuilder class introduced in 1.0, this behavior was changed - accidentally: we still reuse the source topic as the changelog topic for restoring, but will also create a separate changelog topic to append the update records from source topic to. In the 2.0 release, we have fixed this issue and now users - can choose whether or not to reuse the source topic based on the StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG: if you are upgrading from the old KStreamBuilder class and hence you need to change your code to use - the new StreamsBuilder, you should set this config value to StreamsConfig#OPTIMIZE to continue reusing the source topic; if you are upgrading from 1.0 or 1.1 where you are already using StreamsBuilder and hence have already - created a separate changelog topic, you should set this config value to StreamsConfig#NO_OPTIMIZATION when upgrading to {{fullDotVersion}} in order to use that changelog topic for restoring the state store. - More details about the new config StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG can be found in KIP-295. + This Early Access release covers a subset of the functionality detailed in + KIP-1071. + Do not use the new protocol in production. The API is subject to change in future + releases.

    -

    Streams API changes in 4.1.0

    +

    What's Included in Early Access

    + +
      +
    • Core Streams Group Rebalance Protocol: The group.protocol=streams configuration + enables the dedicated streams rebalance protocol. This separates streams groups from consumer groups and + provides a streams-specific group membership lifecycle and metadata management on the broker.
    • +
    • Sticky Task Assignor: A basic task assignment strategy that minimizes task movement + during rebalances is included.
    • +
    • Interactive Query Support: IQ operations are compatible with the new streams protocol.
    • +
    • New Admin RPC: The StreamsGroupDescribe RPC provides streams-specific metadata + separate from consumer group information, with corresponding access via the Admin client.
    • +
    • CLI Integration: You can list, describe, and delete streams groups via the kafka-streams-groups.sh script.
    • +
    + +

    What's Not Included in Early Access

    + +
      +
    • Static Membership: Setting a client `instance.id` will be rejected.
    • +
    • Topology Updates: If a topology is changed significantly (e.g., by adding new source topics + or changing the number of sub-topologies), a new streams group must be created.
    • +
    • High Availability Assignor: Only the sticky assignor is supported.
    • +
    • Regular Expressions: Pattern-based topic subscription is not supported.
    • +
    • Reset Operations: CLI offset reset operations are not supported.
    • +
    • Protocol Migration: Group migration is not available between the classic and new streams protocols.
    • +
    + +

    Why Use the Streams Rebalance Protocol?

    + +
      +
    • + Broker-Driven Coordination: + Centralizes task assignment logic on brokers instead of the client. This provides consistent, + authoritative task assignment decisions from a single coordination point and reduces the potential for + split-brain scenarios. +
    • +
    • + Faster, More Stable Rebalances: + Reduces rebalance duration and impact by removing the global synchronization point. This minimizes + application downtime during membership changes or failures. +
    • +
    • + Better Observability: + Provides dedicated metrics and admin interfaces that separate streams from consumer groups, leading to + clearer troubleshooting with broker-side observability. +
    • +
    + +

    + Enabling the protocol requires the brokers and clients are running Apache Kafka 4.1. It should be enabled + only on new clusters for testing purposes. + Set unstable.feature.versions.enable=true for controllers and brokers, and + set unstable.api.versions.enable=true on the brokers as well. In your Kafka Streams application + configuration, set group.protocol=streams. + After the new feature is configured, check + kafka-features.sh --bootstrap-server localhost:9092 describe + and `streams.version` should now have FinalizedVersionLevel 1. +

    + +

    + Migration between the classic consumer group protocol and the Streams Rebalance Protocol is not supported in + either direction. An application using this protocol must use a new application.id that has not + been used by any application on the classic protocol. Furthermore, this ID must not be in use as a + group.id by any consumer ("classic" or "consumer") nor share-group application. + It is also possible to delete a previous consumer group using kafka-consumer-groups.sh before + starting the application with the new protocol, which will however also delete all offsets for that group. +

    + +

    + To operate the new streams groups, explore the options of kafka-streams-groups.sh to list, + describe, and delete streams groups. In the new protocol, streams.session.timeout.ms, + streams.heartbeat.interval.ms and streams.num.standby.replicas are group-level configurations, + which are ignored when they are set on the client side. Use the kafka-configs.sh tool to set + these configurations, for example: + kafka-configs.sh --bootstrap-server localhost:9092 --alter --entity-type groups + --entity-name wordcount --add-config streams.num.standby.replicas=1. +

    + +

    + Please provide feedback on this feature via the + Kafka mailing lists or by filing + JIRA issues. +

    + +

    Other changes

    The introduction of KIP-1111 @@ -158,7 +233,15 @@

    Streams API
    • Old processor APIs
    • KStream#through() in both Java and Scala
    • -
    • "transformer" methods and classes in both Java and Scala
    • +
    • + "transformer" methods and classes in both Java and Scala +
        +
      • migrating from KStreams#transformValues() to KStreams.processValues() might not be safe + due to KAFKA-19668. + Please refer to the migration guide for more details. +
      • +
      +
    • kstream.KStream#branch in both Java and Scala
    • builder methods for Time/Session/Join/SlidingWindows
    • KafkaStreams#setUncaughtExceptionHandler()
    • @@ -241,22 +324,22 @@

      Streams API

      - You can now configure your topology with a ProcessorWrapper, which allows you to access and optionally wrap/replace - any processor in the topology by injecting an alternative ProcessorSupplier in its place. This can be used to peek - records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to - aid in testing or debugging scenarios. You must implement the ProcessorWrapper interface and then pass the class - or class name into the configs via the new StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG config. NOTE: this config is - applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating - the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that - accepts a TopologyConfig parameter for the StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG to be picked up. - See KIP-1112 for more details. + You can now configure your topology with a ProcessorWrapper, which allows you to access and optionally wrap/replace + any processor in the topology by injecting an alternative ProcessorSupplier in its place. This can be used to peek + records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to + aid in testing or debugging scenarios. You must implement the ProcessorWrapper interface and then pass the class + or class name into the configs via the new StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG config. NOTE: this config is + applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating + the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that + accepts a TopologyConfig parameter for the StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG to be picked up. + See KIP-1112 for more details.

      Upgraded RocksDB dependency to version 9.7.3 (from 7.9.2). This upgrade incorporates various improvements and optimizations within RocksDB. However, it also introduces some API changes. The org.rocksdb.AccessHint class, along with its associated methods, has been removed. Several methods related to compressed block cache configuration in the BlockBasedTableConfig class have been removed, including blockCacheCompressedNumShardBits, blockCacheCompressedSize, and their corresponding setters. These functionalities are now consolidated under the cache option, and developers should configure their compressed block cache using the setCache method instead. - The NO_FILE_CLOSES field has been removed from the org.rocksdb.TickerTypeenum as a result the number-open-files metrics does not work as expected. Metric number-open-files returns constant -1 from now on until it will officially be removed. + The NO_FILE_CLOSES field has been removed from the org.rocksdb.TickerTypeenum as a result the number-open-files metrics does not work as expected. Metric number-open-files returns constant -1 from now on until it will officially be removed. The org.rocksdb.Options.setLogger() method now accepts a LoggerInterface as a parameter instead of the previous Logger. Some data types used in RocksDB's Java API have been modified. These changes, along with the removed class, field, and new methods, are primarily relevant to users implementing custom RocksDB configurations. These changes are expected to be largely transparent to most Kafka Streams users. However, those employing advanced RocksDB customizations within their Streams applications, particularly through the rocksdb.config.setter, are advised to consult the detailed RocksDB 9.7.3 changelog to ensure a smooth transition and adapt their configurations as needed. Specifically, users leveraging the removed AccessHint class, the removed methods from the BlockBasedTableConfig class, the NO_FILE_CLOSES field from TickerType, or relying on the previous signature of setLogger() will need to update their implementations. @@ -535,6 +618,11 @@

      Streams API FixedKeyProcessorContext, and ContextualFixedKeyProcessor are introduced to guard against disallowed key modification inside processValues(). Furthermore, ProcessingContext is added for a better interface hierarchy. + CAUTION: The newly added KStream.processValues() method introduced a regression bug + (KAFKA-19668). + If you have "merge repartition topics" optimization enabled, it is not safe to migrate from transformValues() + to processValues() in 3.3.0 release. The bug is only fixed with Kafka Streams 4.0.1, 4.1.1, and 4.2.0. + For more details, please refer to the migration guide.

      @@ -1033,705 +1121,9 @@

      < Hence, this feature won't be supported in the future any longer and you need to updated your code accordingly. If you use a custom PartitionGrouper and stop to use it, the created tasks might change. Hence, you will need to reset your application to upgrade it. - - -

      Streams API changes in 2.3.0

      - -

      Version 2.3.0 adds the Suppress operator to the kafka-streams-scala Ktable API.

      - -

      - As of 2.3.0 Streams now offers an in-memory version of the window (KIP-428) - and the session (KIP-445) store, in addition to the persistent ones based on RocksDB. - The new public interfaces inMemoryWindowStore() and inMemorySessionStore() are added to Stores and provide the built-in in-memory window or session store. -

      - -

      - As of 2.3.0 we've updated how to turn on optimizations. Now to enable optimizations, you need to do two things. - First add this line to your properties properties.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);, as you have done before. - Second, when constructing your KafkaStreams instance, you'll need to pass your configuration properties when building your - topology by using the overloaded StreamsBuilder.build(Properties) method. - For example KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(properties), properties). -

      - -

      - In 2.3.0 we have added default implementation to close() and configure() for Serializer, - Deserializer and Serde so that they can be implemented by lambda expression. - For more details please read KIP-331. -

      - -

      - To improve operator semantics, new store types are added that allow storing an additional timestamp per key-value pair or window. - Some DSL operators (for example KTables) are using those new stores. - Hence, you can now retrieve the last update timestamp via Interactive Queries if you specify - TimestampedKeyValueStoreType or TimestampedWindowStoreType as your QueryableStoreType. - While this change is mainly transparent, there are some corner cases that may require code changes: - Caution: If you receive an untyped store and use a cast, you might need to update your code to cast to the correct type. - Otherwise, you might get an exception similar to - java.lang.ClassCastException: class org.apache.kafka.streams.state.ValueAndTimestamp cannot be cast to class YOUR-VALUE-TYPE - upon getting a value from the store. - Additionally, TopologyTestDriver#getStateStore() only returns non-built-in stores and throws an exception if a built-in store is accessed. - For more details please read KIP-258. -

      - -

      - To improve type safety, a new operator KStream#flatTransformValues is added. - For more details please read KIP-313. -

      - -

      - Kafka Streams used to set the configuration parameter max.poll.interval.ms to Integer.MAX_VALUE. - This default value is removed and Kafka Streams uses the consumer default value now. - For more details please read KIP-442. -

      - -

      - Default configuration for repartition topic was changed: - The segment size for index files (segment.index.bytes) is no longer 50MB, but uses the cluster default. - Similarly, the configuration segment.ms in no longer 10 minutes, but uses the cluster default configuration. - Lastly, the retention period (retention.ms) is changed from Long.MAX_VALUE to -1 (infinite). - For more details please read KIP-443. -

      - -

      - To avoid memory leaks, RocksDBConfigSetter has a new close() method that is called on shutdown. - Users should implement this method to release any memory used by RocksDB config objects, by closing those objects. - For more details please read KIP-453. -

      - -

      - RocksDB dependency was updated to version 5.18.3. - The new version allows to specify more RocksDB configurations, including WriteBufferManager which helps to limit RocksDB off-heap memory usage. - For more details please read KAFKA-8215. -

      - -

      Streams API changes in 2.2.0

      -

      - We've simplified the KafkaStreams#state transition diagram during the starting up phase a bit in 2.2.0: in older versions the state will transit from CREATED to RUNNING, and then to REBALANCING to get the first - stream task assignment, and then back to RUNNING; starting in 2.2.0 it will transit from CREATED directly to REBALANCING and then to RUNNING. - If you have registered a StateListener that captures state transition events, you may need to adjust your listener implementation accordingly for this simplification (in practice, your listener logic should be very unlikely to be affected at all). -

      - -

      - In WindowedSerdes, we've added a new static constructor to return a TimeWindowSerde with configurable window size. This is to help users to construct time window serdes to read directly from a time-windowed store's changelog. - More details can be found in KIP-393. -

      - -

      - In 2.2.0 we have extended a few public interfaces including KafkaStreams to extend AutoCloseable so that they can be - used in a try-with-resource statement. For a full list of public interfaces that get impacted please read KIP-376. -

      - -

      Streams API changes in 2.1.0

      -

      - We updated TopologyDescription API to allow for better runtime checking. - Users are encouraged to use #topicSet() and #topicPattern() accordingly on TopologyDescription.Source nodes, - instead of using #topics(), which has since been deprecated. Similarly, use #topic() and #topicNameExtractor() - to get descriptions of TopologyDescription.Sink nodes. For more details, see - KIP-321. -

      - -

      - We've added a new class Grouped and deprecated Serialized. The intent of adding Grouped is the ability to - name repartition topics created when performing aggregation operations. Users can name the potential repartition topic using the - Grouped#as() method which takes a String and is used as part of the repartition topic name. The resulting repartition - topic name will still follow the pattern of ${application-id}->name<-repartition. The Grouped class is now favored over - Serialized in KStream#groupByKey(), KStream#groupBy(), and KTable#groupBy(). - Note that Kafka Streams does not automatically create repartition topics for aggregation operations. - - Additionally, we've updated the Joined class with a new method Joined#withName - enabling users to name any repartition topics required for performing Stream/Stream or Stream/Table join. For more details repartition - topic naming, see KIP-372. - - As a result we've updated the Kafka Streams Scala API and removed the Serialized class in favor of adding Grouped. - If you just rely on the implicit Serialized, you just need to recompile; if you pass in Serialized explicitly, sorry you'll have to make code changes. -

      - -

      - We've added a new config named max.task.idle.ms to allow users specify how to handle out-of-order data within a task that may be processing multiple - topic-partitions (see Out-of-Order Handling section for more details). - The default value is set to 0, to favor minimized latency over synchronization between multiple input streams from topic-partitions. - If users would like to wait for longer time when some of the topic-partitions do not have data available to process and hence cannot determine its corresponding stream time, - they can override this config to a larger value. -

      - -

      - We've added the missing SessionBytesStoreSupplier#retentionPeriod() to be consistent with the WindowBytesStoreSupplier which allows users to get the specified retention period for session-windowed stores. - We've also added the missing StoreBuilder#withCachingDisabled() to allow users to turn off caching for their customized stores. -

      - -

      - We added a new serde for UUIDs (Serdes.UUIDSerde) that you can use via Serdes.UUID() - (cf. KIP-206). -

      - -

      - We updated a list of methods that take long arguments as either timestamp (fix point) or duration (time period) - and replaced them with Instant and Duration parameters for improved semantics. - Some old methods base on long are deprecated and users are encouraged to update their code. -
      - In particular, aggregation windows (hopping/tumbling/unlimited time windows and session windows) as well as join windows now take Duration - arguments to specify window size, hop, and gap parameters. - Also, window sizes and retention times are now specified as Duration type in Stores class. - The Window class has new methods #startTime() and #endTime() that return window start/end timestamp as Instant. - For interactive queries, there are new #fetch(...) overloads taking Instant arguments. - Additionally, punctuations are now registered via ProcessorContext#schedule(Duration interval, ...). - For more details, see KIP-358. -

      - -

      - We deprecated KafkaStreams#close(...) and replaced it with KafkaStreams#close(Duration) that accepts a single timeout argument - Note: the new #close(Duration) method has improved (but slightly different) semantics. - For more details, see KIP-358. -

      - -

      - The newly exposed AdminClient metrics are now available when calling the KafkaStream#metrics() method. - For more details on exposing AdminClients metrics - see KIP-324 -

      - -

      - We deprecated the notion of segments in window stores as those are intended to be an implementation details. - Thus, method Windows#segments() and variable Windows#segments were deprecated. - If you implement custom windows, you should update your code accordingly. - Similarly, WindowBytesStoreSupplier#segments() was deprecated and replaced with WindowBytesStoreSupplier#segmentInterval(). - If you implement custom window store, you need to update your code accordingly. - Finally, Stores#persistentWindowStore(...) were deprecated and replaced with a new overload that does not allow to specify the number of segments any longer. - For more details, see KIP-319 - (note: KIP-328 and - KIP-358 'overlap' with KIP-319). -

      - -

      - We've added an overloaded StreamsBuilder#build method that accepts an instance of java.util.Properties with the intent of using the - StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG config added in Kafka Streams 2.0. Before 2.1, when building a topology with - the DSL, Kafka Streams writes the physical plan as the user makes calls on the DSL. Now by providing a java.util.Properties instance when - executing a StreamsBuilder#build call, Kafka Streams can optimize the physical plan of the topology, provided the StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG - config is set to StreamsConfig#OPTIMIZE. By setting StreamsConfig#OPTIMIZE in addition to the KTable optimization of - reusing the source topic as the changelog topic, the topology may be optimized to merge redundant repartition topics into one - repartition topic. The original no parameter version of StreamsBuilder#build is still available for those who wish to not - optimize their topology. Note that enabling optimization of the topology may require you to do an application reset when redeploying the application. For more - details, see KIP-312 -

      - -

      - We are introducing static membership towards Kafka Streams user. This feature reduces unnecessary rebalances during normal application upgrades or rolling bounces. - For more details on how to use it, checkout static membership design. - Note, Kafka Streams uses the same ConsumerConfig#GROUP_INSTANCE_ID_CONFIG, and you only need to make sure it is uniquely defined across - different stream instances in one application. -

      - -

      Streams API changes in 2.0.0

      -

      - In 2.0.0 we have added a few new APIs on the ReadOnlyWindowStore interface (for details please read Streams API changes below). - If you have customized window store implementations that extends the ReadOnlyWindowStore interface you need to make code changes. -

      - -

      - In addition, if you using Java 8 method references in your Kafka Streams code you might need to update your code to resolve method ambiguities. - Hot-swapping the jar-file only might not work for this case. - See below a complete list of 2.0.0 - API and semantic changes that allow you to advance your application and/or simplify your code base. -

      - -

      - We moved Consumed interface from org.apache.kafka.streams to org.apache.kafka.streams.kstream - as it was mistakenly placed in the previous release. If your code has already used it there is a simple one-liner change needed in your import statement. -

      - -

      - We have also removed some public APIs that are deprecated prior to 1.0.x in 2.0.0. - See below for a detailed list of removed APIs. -

      -

      - We have removed the skippedDueToDeserializationError-rate and skippedDueToDeserializationError-total metrics. - Deserialization errors, and all other causes of record skipping, are now accounted for in the pre-existing metrics - skipped-records-rate and skipped-records-total. When a record is skipped, the event is - now logged at WARN level. If these warnings become burdensome, we recommend explicitly filtering out unprocessable - records instead of depending on record skipping semantics. For more details, see - KIP-274. - As of right now, the potential causes of skipped records are: -

      -
        -
      • null keys in table sources
      • -
      • null keys in table-table inner/left/outer/right joins
      • -
      • null keys or values in stream-table joins
      • -
      • null keys or values in stream-stream joins
      • -
      • null keys or values in aggregations on grouped streams
      • -
      • null keys or values in reductions on grouped streams
      • -
      • null keys in aggregations on windowed streams
      • -
      • null keys in reductions on windowed streams
      • -
      • null keys in aggregations on session-windowed streams
      • -
      • - Errors producing results, when the configured default.production.exception.handler decides to - CONTINUE (the default is to FAIL and throw an exception). -
      • -
      • - Errors deserializing records, when the configured default.deserialization.exception.handler - decides to CONTINUE (the default is to FAIL and throw an exception). - This was the case previously captured in the skippedDueToDeserializationError metrics. -
      • -
      • Fetched records having a negative timestamp.
      • -
      - -

      - We've also fixed the metrics name for time and session windowed store operations in 2.0. As a result, our current built-in stores - will have their store types in the metric names as in-memory-state, in-memory-lru-state, - rocksdb-state, rocksdb-window-state, and rocksdb-session-state. For example, a RocksDB time windowed store's - put operation metrics would now be - kafka.streams:type=stream-rocksdb-window-state-metrics,client-id=([-.\w]+),task-id=([-.\w]+),rocksdb-window-state-id=([-.\w]+). - Users need to update their metrics collecting and reporting systems for their time and session windowed stores accordingly. - For more details, please read the State Store Metrics section. -

      - -

      - We have added support for methods in ReadOnlyWindowStore which allows for querying a single window's key-value pair. - For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well. - For more details, see KIP-261. -

      -

      - We have added public WindowedSerdes to allow users to read from / write to a topic storing windowed table changelogs directly. - In addition, in StreamsConfig we have also added default.windowed.key.serde.inner and default.windowed.value.serde.inner - to let users specify inner serdes if the default serde classes are windowed serdes. - For more details, see KIP-265. -

      -

      - We've added message header support in the Processor API in Kafka 2.0.0. In particular, we have added a new API ProcessorContext#headers() - which returns a Headers object that keeps track of the headers of the source topic's message that is being processed. Through this object, users can manipulate - the headers map that is being propagated throughout the processor topology as well. For more details please feel free to read - the Developer Guide section. -

      -

      - We have deprecated constructors of KafkaStreams that take a StreamsConfig as parameter. - Please use the other corresponding constructors that accept java.util.Properties instead. - For more details, see KIP-245. -

      -

      - Kafka 2.0.0 allows to manipulate timestamps of output records using the Processor API (KIP-251). - To enable this new feature, ProcessorContext#forward(...) was modified. - The two existing overloads #forward(Object key, Object value, String childName) and #forward(Object key, Object value, int childIndex) were deprecated and a new overload #forward(Object key, Object value, To to) was added. - The new class To allows you to send records to all or specific downstream processors by name and to set the timestamp for the output record. - Forwarding based on child index is not supported in the new API any longer. -

      -

      - We have added support to allow routing records dynamically to Kafka topics. More specifically, in both the lower-level Topology#addSink and higher-level KStream#to APIs, we have added variants that - take a TopicNameExtractor instance instead of a specific String typed topic name, such that for each received record from the upstream processor, the library will dynamically determine which Kafka topic to write to - based on the record's key and value, as well as record context. Note that all the Kafka topics that may possibly be used are still considered as user topics and hence required to be pre-created. In addition to that, we have modified the - StreamPartitioner interface to add the topic name parameter since the topic name now may not be known beforehand; users who have customized implementations of this interface would need to update their code while upgrading their application - to use Kafka Streams 2.0.0. -

      -

      - KIP-284 changed the retention time for repartition topics by setting its default value to Long.MAX_VALUE. - Instead of relying on data retention Kafka Streams uses the new purge data API to delete consumed data from those topics and to keep used storage small now. -

      -

      - We have modified the ProcessorStateManger#register(...) signature and removed the deprecated loggingEnabled boolean parameter as it is specified in the StoreBuilder. - Users who used this function to register their state stores into the processor topology need to simply update their code and remove this parameter from the caller. -

      -

      - Kafka Streams DSL for Scala is a new Kafka Streams client library available for developers authoring Kafka Streams applications in Scala. It wraps core Kafka Streams DSL types to make it easier to call when - interoperating with Scala code. For example, it includes higher order functions as parameters for transformations avoiding the need anonymous classes in Java 7 or experimental SAM type conversions in Scala 2.11, - automatic conversion between Java and Scala collection types, a way - to implicitly provide Serdes to reduce boilerplate from your application and make it more typesafe, and more! For more information see the - Kafka Streams DSL for Scala documentation and - KIP-270. -

      -

      - We have removed these deprecated APIs: -

      -
        -
      • KafkaStreams#toString no longer returns the topology and runtime metadata; to get topology metadata users can call Topology#describe() and to get thread runtime metadata users can call KafkaStreams#localThreadsMetadata (they are deprecated since 1.0.0). - For detailed guidance on how to update your code please read here
      • -
      • TopologyBuilder and KStreamBuilder are removed and replaced by Topology and StreamsBuidler respectively (they are deprecated since 1.0.0). - For detailed guidance on how to update your code please read here
      • -
      • StateStoreSupplier are removed and replaced with StoreBuilder (they are deprecated since 1.0.0); - and the corresponding Stores#create and KStream, KTable, KGroupedStream overloaded functions that use it have also been removed. - For detailed guidance on how to update your code please read here
      • -
      • KStream, KTable, KGroupedStream overloaded functions that requires serde and other specifications explicitly are removed and replaced with simpler overloaded functions that use Consumed, Produced, Serialized, Materialized, Joined (they are deprecated since 1.0.0). - For detailed guidance on how to update your code please read here
      • -
      • Processor#punctuate, ValueTransformer#punctuate, ValueTransformer#punctuate and ProcessorContext#schedule(long) are removed and replaced by ProcessorContext#schedule(long, PunctuationType, Punctuator) (they are deprecated in 1.0.0).
      • -
      • The second boolean typed parameter "loggingEnabled" in ProcessorContext#register has been removed; users can now use StoreBuilder#withLoggingEnabled, withLoggingDisabled to specify the behavior when they create the state store.
      • -
      • KTable#writeAs, print, foreach, to, through are removed, users can call KTable#tostream()#writeAs instead for the same purpose (they are deprecated since 0.11.0.0). - For detailed list of removed APIs please read here
      • -
      • StreamsConfig#KEY_SERDE_CLASS_CONFIG, VALUE_SERDE_CLASS_CONFIG, TIMESTAMP_EXTRACTOR_CLASS_CONFIG are removed and replaced with StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG, DEFAULT_VALUE_SERDE_CLASS_CONFIG, DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG respectively (they are deprecated since 0.11.0.0).
      • -
      • StreamsConfig#ZOOKEEPER_CONNECT_CONFIG are removed as we do not need ZooKeeper dependency in Streams any more (it is deprecated since 0.10.2.0).
      • -
      - -

      Streams API changes in 1.1.0

      -

      - We have added support for methods in ReadOnlyWindowStore which allows for querying WindowStores without the necessity of providing keys. - For users who have customized window store implementations on the above interface, they'd need to update their code to implement the newly added method as well. - For more details, see KIP-205. -

      - -

      - There is a new artifact kafka-streams-test-utils providing a TopologyTestDriver, ConsumerRecordFactory, and OutputVerifier class. - You can include the new artifact as a regular dependency to your unit tests and use the test driver to test your business logic of your Kafka Streams application. - For more details, see KIP-247.

      -

      - The introduction of KIP-220 - enables you to provide configuration parameters for the embedded admin client created by Kafka Streams, similar to the embedded producer and consumer clients. - You can provide the configs via StreamsConfig by adding the configs with the prefix admin. as defined by StreamsConfig#adminClientPrefix(String) - to distinguish them from configurations of other clients that share the same config names. -

      - -

      - New method in KTable -

      -
        -
      • transformValues methods have been added to KTable. Similar to those on KStream, these methods allow for richer, stateful, value transformation similar to the Processor API.
      • -
      - -

      - New method in GlobalKTable -

      -
        -
      • A method has been provided such that it will return the store name associated with the GlobalKTable or null if the store name is non-queryable.
      • -
      - -

      - New methods in KafkaStreams: -

      -
        -
      • added overload for the constructor that allows overriding the Time object used for tracking system wall-clock time; this is useful for unit testing your application code.
      • -
      - -

      New methods in KafkaClientSupplier:

      -
        -
      • added getAdminClient(config) that allows to override an AdminClient used for administrative requests such as internal topic creations, etc.
      • -
      - -

      New error handling for exceptions during production:

      -
        -
      • added interface ProductionExceptionHandler that allows implementors to decide whether or not Streams should FAIL or CONTINUE when certain exception occur while trying to produce.
      • -
      • provided an implementation, DefaultProductionExceptionHandler that always fails, preserving the existing behavior by default.
      • -
      • changing which implementation is used can be done by settings default.production.exception.handler to the fully qualified name of a class implementing this interface.
      • -
      - -

      Changes in StreamsResetter:

      -
        -
      • added options to specify input topics offsets to reset according to KIP-171
      • -
      - -

      Streams API changes in 1.0.0

      - -

      - With 1.0 a major API refactoring was accomplished and the new API is cleaner and easier to use. - This change includes the five main classes KafkaStreams, KStreamBuilder, - KStream, KTable, and TopologyBuilder (and some more others). - All changes are fully backward compatible as old API is only deprecated but not removed. - We recommend to move to the new API as soon as you can. - We will summarize all API changes in the next paragraphs. -

      - -

      - The two main classes to specify a topology via the DSL (KStreamBuilder) - or the Processor API (TopologyBuilder) were deprecated and replaced by - StreamsBuilder and Topology (both new classes are located in - package org.apache.kafka.streams). - Note, that StreamsBuilder does not extend Topology, i.e., - the class hierarchy is different now. - The new classes have basically the same methods as the old ones to build a topology via DSL or Processor API. - However, some internal methods that were public in KStreamBuilder - and TopologyBuilder but not part of the actual API are not present - in the new classes any longer. - Furthermore, some overloads were simplified compared to the original classes. - See KIP-120 - and KIP-182 - for full details. -

      - -

      - Changing how a topology is specified also affects KafkaStreams constructors, - that now only accept a Topology. - Using the DSL builder class StreamsBuilder one can get the constructed - Topology via StreamsBuilder#build(). - Additionally, a new class org.apache.kafka.streams.TopologyDescription - (and some more dependent classes) were added. - Those can be used to get a detailed description of the specified topology - and can be obtained by calling Topology#describe(). - An example using this new API is shown in the quickstart section. -

      - -

      - New methods in KStream: -

      -
        -
      • With the introduction of KIP-202 - a new method merge() has been created in KStream as the StreamsBuilder class's StreamsBuilder#merge() has been removed. - The method signature was also changed, too: instead of providing multiple KStreams into the method at the once, only a single KStream is accepted. -
      • -
      - -

      - New methods in KafkaStreams: -

      -
        -
      • retrieve the current runtime information about the local threads via localThreadsMetadata()
      • -
      • observe the restoration of all state stores via setGlobalStateRestoreListener(), in which users can provide their customized implementation of the org.apache.kafka.streams.processor.StateRestoreListener interface
      • -
      - -

      - Deprecated / modified methods in KafkaStreams: -

      -
        -
      • - toString(), toString(final String indent) were previously used to return static and runtime information. - They have been deprecated in favor of using the new classes/methods localThreadsMetadata() / ThreadMetadata (returning runtime information) and - TopologyDescription / Topology#describe() (returning static information). -
      • -
      • - With the introduction of KIP-182 - you should no longer pass in Serde to KStream#print operations. - If you can't rely on using toString to print your keys an values, you should instead you provide a custom KeyValueMapper via the Printed#withKeyValueMapper call. -
      • -
      • - setStateListener() now can only be set before the application start running, i.e. before KafkaStreams.start() is called. -
      • -
      - -

      - Deprecated methods in KGroupedStream -

      -
        -
      • - Windowed aggregations have been deprecated from KGroupedStream and moved to WindowedKStream. - You can now perform a windowed aggregation by, for example, using KGroupedStream#windowedBy(Windows)#reduce(Reducer). -
      • -
      - -

      - Modified methods in Processor: -

      -
        -
      • -

        - The Processor API was extended to allow users to schedule punctuate functions either based on data-driven stream time or wall-clock time. - As a result, the original ProcessorContext#schedule is deprecated with a new overloaded function that accepts a user customizable Punctuator callback interface, which triggers its punctuate API method periodically based on the PunctuationType. - The PunctuationType determines what notion of time is used for the punctuation scheduling: either stream time or wall-clock time (by default, stream time is configured to represent event time via TimestampExtractor). - In addition, the punctuate function inside Processor is also deprecated. -

        -

        - Before this, users could only schedule based on stream time (i.e. PunctuationType.STREAM_TIME) and hence the punctuate function was data-driven only because stream time is determined (and advanced forward) by the timestamps derived from the input data. - If there is no data arriving at the processor, the stream time would not advance and hence punctuation will not be triggered. - On the other hand, When wall-clock time (i.e. PunctuationType.WALL_CLOCK_TIME) is used, punctuate will be triggered purely based on wall-clock time. - So for example if the Punctuator function is scheduled based on PunctuationType.WALL_CLOCK_TIME, if these 60 records were processed within 20 seconds, - punctuate would be called 2 times (one time every 10 seconds); - if these 60 records were processed within 5 seconds, then no punctuate would be called at all. - Users can schedule multiple Punctuator callbacks with different PunctuationTypes within the same processor by simply calling ProcessorContext#schedule multiple times inside processor's init() method. -

        -
      • -
      - -

      - If you are monitoring on task level or processor-node / state store level Streams metrics, please note that the metrics sensor name and hierarchy was changed: - The task ids, store names and processor names are no longer in the sensor metrics names, but instead are added as tags of the sensors to achieve consistent metrics hierarchy. - As a result you may need to make corresponding code changes on your metrics reporting and monitoring tools when upgrading to 1.0.0. - Detailed metrics sensor can be found in the Streams Monitoring section. -

      - -

      - The introduction of KIP-161 - enables you to provide a default exception handler for deserialization errors when reading data from Kafka rather than throwing the exception all the way out of your streams application. - You can provide the configs via the StreamsConfig as StreamsConfig#DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG. - The specified handler must implement the org.apache.kafka.streams.errors.DeserializationExceptionHandler interface. -

      - -

      - The introduction of KIP-173 - enables you to provide topic configuration parameters for any topics created by Kafka Streams. - This includes repartition and changelog topics. - You can provide the configs via the StreamsConfig by adding the configs with the prefix as defined by StreamsConfig#topicPrefix(String). - Any properties in the StreamsConfig with the prefix will be applied when creating internal topics. - Any configs that aren't topic configs will be ignored. - If you already use StateStoreSupplier or Materialized to provide configs for changelogs, then they will take precedence over those supplied in the config. -

      - -

      Streams API changes in 0.11.0.0

      - -

      Updates in StreamsConfig:

      -
        -
      • new configuration parameter processing.guarantee is added
      • -
      • configuration parameter key.serde was deprecated and replaced by default.key.serde
      • -
      • configuration parameter value.serde was deprecated and replaced by default.value.serde
      • -
      • configuration parameter timestamp.extractor was deprecated and replaced by default.timestamp.extractor
      • -
      • method keySerde() was deprecated and replaced by defaultKeySerde()
      • -
      • method valueSerde() was deprecated and replaced by defaultValueSerde()
      • -
      • new method defaultTimestampExtractor() was added
      • -
      - -

      New methods in TopologyBuilder:

      -
        -
      • added overloads for addSource() that allow to define a TimestampExtractor per source node
      • -
      • added overloads for addGlobalStore() that allow to define a TimestampExtractor per source node associated with the global store
      • -
      - -

      New methods in KStreamBuilder:

      -
        -
      • added overloads for stream() that allow to define a TimestampExtractor per input stream
      • -
      • added overloads for table() that allow to define a TimestampExtractor per input table
      • -
      • added overloads for globalKTable() that allow to define a TimestampExtractor per global table
      • -
      - -

      Deprecated methods in KTable:

      -
        -
      • void foreach(final ForeachAction<? super K, ? super V> action)
      • -
      • void print()
      • -
      • void print(final String streamName)
      • -
      • void print(final Serde<K> keySerde, final Serde<V> valSerde)
      • -
      • void print(final Serde<K> keySerde, final Serde<V> valSerde, final String streamName)
      • -
      • void writeAsText(final String filePath)
      • -
      • void writeAsText(final String filePath, final String streamName)
      • -
      • void writeAsText(final String filePath, final Serde<K> keySerde, final Serde<V> valSerde)
      • -
      • void writeAsText(final String filePath, final String streamName, final Serde<K> keySerde, final Serde<V> valSerde)
      • -
      - -

      - The above methods have been deprecated in favor of using the Interactive Queries API. - If you want to query the current content of the state store backing the KTable, use the following approach: -

      -
        -
      • Make a call to KafkaStreams.store(final String storeName, final QueryableStoreType<T> queryableStoreType)
      • -
      • Then make a call to ReadOnlyKeyValueStore.all() to iterate over the keys of a KTable.
      • -
      -

      - If you want to view the changelog stream of the KTable then you could call KTable.toStream().print(Printed.toSysOut). -

      - -

      Metrics using exactly-once semantics:

      -

      - If "exactly_once" processing (EOS version 1) is enabled via the processing.guarantee parameter, - internally Streams switches from a producer-per-thread to a producer-per-task runtime model. - Using "exactly_once_beta" (EOS version 2) does use a producer-per-thread, so client.id doesn't change, - compared with "at_least_once" for this case). - In order to distinguish the different producers, the producer's client.id additionally encodes the task-ID for this case. - Because the producer's client.id is used to report JMX metrics, it might be required to update tools that receive those metrics. -

      - -

      Producer's client.id naming schema:

      -
        -
      • at-least-once (default): [client.Id]-StreamThread-[sequence-number]
      • -
      • exactly-once: [client.Id]-StreamThread-[sequence-number]-[taskId]
      • -
      • exactly-once-beta: [client.Id]-StreamThread-[sequence-number]
      • -
      -

      [client.Id] is either set via Streams configuration parameter client.id or defaults to [application.id]-[processId] ([processId] is a random UUID).

      - -

      Notable changes in 0.10.2.1

      - -

      - Parameter updates in StreamsConfig: -

      -
        -
      • The default config values of embedded producer's retries and consumer's max.poll.interval.ms have been changed to improve the resiliency of a Kafka Streams application
      • -
      - -

      Streams API changes in 0.10.2.0

      - -

      - New methods in KafkaStreams: -

      -
        -
      • set a listener to react on application state change via setStateListener(StateListener listener)
      • -
      • retrieve the current application state via state()
      • -
      • retrieve the global metrics registry via metrics()
      • -
      • apply a timeout when closing an application via close(long timeout, TimeUnit timeUnit)
      • -
      • specify a custom indent when retrieving Kafka Streams information via toString(String indent)
      • -
      - -

      - Parameter updates in StreamsConfig: -

      -
        -
      • parameter zookeeper.connect was deprecated; a Kafka Streams application does no longer interact with ZooKeeper for topic management but uses the new broker admin protocol - (cf. KIP-4, Section "Topic Admin Schema")
      • -
      • added many new parameters for metrics, security, and client configurations
      • -
      - -

      Changes in StreamsMetrics interface:

      -
        -
      • removed methods: addLatencySensor()
      • -
      • added methods: addLatencyAndThroughputSensor(), addThroughputSensor(), recordThroughput(), - addSensor(), removeSensor()
      • -
      - -

      New methods in TopologyBuilder:

      -
        -
      • added overloads for addSource() that allow to define a auto.offset.reset policy per source node
      • -
      • added methods addGlobalStore() to add global StateStores
      • -
      - -

      New methods in KStreamBuilder:

      -
        -
      • added overloads for stream() and table() that allow to define a auto.offset.reset policy per input stream/table
      • -
      • added method globalKTable() to create a GlobalKTable
      • -
      - -

      New joins for KStream:

      -
        -
      • added overloads for join() to join with KTable
      • -
      • added overloads for join() and leftJoin() to join with GlobalKTable
      • -
      • note, join semantics in 0.10.2 were improved and thus you might see different result compared to 0.10.0.x and 0.10.1.x - (cf. Kafka Streams Join Semantics in the Apache Kafka wiki) -
      - -

      Aligned null-key handling for KTable joins:

      -
        -
      • like all other KTable operations, KTable-KTable joins do not throw an exception on null key records anymore, but drop those records silently
      • -
      - -

      New window type Session Windows:

      -
        -
      • added class SessionWindows to specify session windows
      • -
      • added overloads for KGroupedStream methods count(), reduce(), and aggregate() - to allow session window aggregations
      • -
      - -

      Changes to TimestampExtractor:

      -
        -
      • method extract() has a second parameter now
      • -
      • new default timestamp extractor class FailOnInvalidTimestamp - (it gives the same behavior as old (and removed) default extractor ConsumerRecordTimestampExtractor)
      • -
      • new alternative timestamp extractor classes LogAndSkipOnInvalidTimestamp and UsePreviousTimeOnInvalidTimestamps
      • -
      - -

      Relaxed type constraints of many DSL interfaces, classes, and methods (cf. KIP-100).

      - -

      Streams API changes in 0.10.1.0

      - -

      Stream grouping and aggregation split into two methods:

      -
        -
      • old: KStream #aggregateByKey(), #reduceByKey(), and #countByKey()
      • -
      • new: KStream#groupByKey() plus KGroupedStream #aggregate(), #reduce(), and #count()
      • -
      • Example: stream.countByKey() changes to stream.groupByKey().count()
      • -
      - -

      Auto Repartitioning:

      -
        -
      • a call to through() after a key-changing operator and before an aggregation/join is no longer required
      • -
      • Example: stream.selectKey(...).through(...).countByKey() changes to stream.selectKey().groupByKey().count()
      • -
      - -

      TopologyBuilder:

      -
        -
      • methods #sourceTopics(String applicationId) and #topicGroups(String applicationId) got simplified to #sourceTopics() and #topicGroups()
      • -
      - -

      DSL: new parameter to specify state store names:

      -
        -
      • The new Interactive Queries feature requires to specify a store name for all source KTables and window aggregation result KTables (previous parameter "operator/window name" is now the storeName)
      • -
      • KStreamBuilder#table(String topic) changes to #topic(String topic, String storeName)
      • -
      • KTable#through(String topic) changes to #through(String topic, String storeName)
      • -
      • KGroupedStream #aggregate(), #reduce(), and #count() require additional parameter "String storeName"
      • -
      • Example: stream.countByKey(TimeWindows.of("windowName", 1000)) changes to stream.groupByKey().count(TimeWindows.of(1000), "countStoreName")
      • -
      - -

      Windowing:

      -
        -
      • Windows are not named anymore: TimeWindows.of("name", 1000) changes to TimeWindows.of(1000) (cf. DSL: new parameter to specify state store names)
      • -
      • JoinWindows has no default size anymore: JoinWindows.of("name").within(1000) changes to JoinWindows.of(1000)
      • -
      +

      For Streams API changes in version older than 2.4.x, please check 3.9 upgrade document.

      Streams API broker compatibility

      @@ -1747,8 +1139,8 @@

      Kafka Streams API (rows) - 2.1.x and
      2.2.x and
      2.3.x and
      2.4.x and
      2.5.x and
      2.6.x and
      2.7.x and
      2.8.x and
      3.0.x and
      3.1.x and
      3.2.x and
      3.3.x and
      3.4.x and
      3.5.x and
      3.6.x and
      3.7.x and
      3.8.x and
      3.9.x - 4.0.x + 2.4.x and
      2.5.x and
      2.6.x and
      2.7.x and
      2.8.x and
      3.0.x and
      3.1.x and
      3.2.x and
      3.3.x and
      3.4.x and
      3.5.x and
      3.6.x and
      3.7.x and
      3.8.x and
      3.9.x and
      4.0.x + 4.1.x 2.4.x and
      2.5.x @@ -1756,7 +1148,7 @@

      compatible - 2.6.x and
      2.7.x and
      2.8.x and
      3.0.x and
      3.1.x and
      3.2.x and
      3.3.x and
      3.4.x and
      3.5.x and
      3.6.x and
      3.7.x and
      3.8.x and
      3.9.x and
      4.0.x + 2.6.x and
      2.7.x and
      2.8.x and
      3.0.x and
      3.1.x and
      3.2.x and
      3.3.x and
      3.4.x and
      3.5.x and
      3.6.x and
      3.7.x and
      3.8.x and
      3.9.x and
      4.0.x and
      4.1.x compatible; enabling exactly-once v2 requires broker version 2.5.x or higher compatible diff --git a/docs/toc.html b/docs/toc.html index c42961cf7fbcd..304bd1c8a53c1 100644 --- a/docs/toc.html +++ b/docs/toc.html @@ -36,29 +36,31 @@
    • 3. Configuration @@ -148,6 +153,7 @@
    • 6.7 Monitoring
      • Security Considerations for Remote Monitoring using JMX +
      • Group Coordinator Monitoring
      • Tiered Storage Monitoring
      • KRaft Monitoring
      • Selector Monitoring @@ -162,7 +168,9 @@
      • 6.8 KRaft
        • Configuration -
        • Storage Tool +
        • Upgrade +
        • Provisioning Nodes +
        • Controller membership changes
        • Debugging
        • Deploying Considerations
        • ZooKeeper to KRaft Migration diff --git a/docs/upgrade.html b/docs/upgrade.html index 63b3886de24ab..b8a0614456cf2 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -19,25 +19,223 @@